From db813b3f8994bedd7f945b70b168b42128d66209 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 15 Jul 2024 08:20:48 -0400 Subject: [PATCH 001/104] wip deployer script --- deploy.rb | 154 ++++++++++++++++++++++++ deployer.Dockerfile | 10 ++ deployer.Dockerfile.dockerignore | 4 + internal/command/launch/cmd.go | 26 ++++ internal/command/launch/generate.go | 70 +++++++++++ internal/command/launch/launch.go | 27 ++++- internal/command/launch/plan_builder.go | 21 ++++ internal/command/launch/state.go | 6 +- 8 files changed, 312 insertions(+), 6 deletions(-) create mode 100755 deploy.rb create mode 100644 deployer.Dockerfile create mode 100644 deployer.Dockerfile.dockerignore create mode 100644 internal/command/launch/generate.go diff --git a/deploy.rb b/deploy.rb new file mode 100755 index 0000000000..9a351f1825 --- /dev/null +++ b/deploy.rb @@ -0,0 +1,154 @@ +#!/usr/bin/ruby + +require 'json' +require 'time' +require 'open3' + +LOG_PREFIX = ENV["LOG_PREFIX"] + +module Step + ROOT = :__root__ + GIT_PULL = :git_pull + PLAN = :plan + DEPLOY = :deploy +end + +$current_step = Step::ROOT + +$counter = 0 +$counter_mutex = Mutex.new + +def id + $counter_mutex.synchronize do + $counter += 1 + $counter + end +end + +$start = Process.clock_gettime(Process::CLOCK_MONOTONIC) + +def elapsed + Process.clock_gettime(Process::CLOCK_MONOTONIC) - $start +end + +def nputs(type:, payload: nil) + obj = { id: id(), step: $current_step, type: type, time: elapsed(), payload: payload }.compact + puts "#{LOG_PREFIX}#{obj.to_json}" +end + +# prefixed events +def event(name, meta = nil) + nputs(type: "event:#{name}", payload: meta) +end + +def artifact(name, body) + nputs(type: "artifact:#{name}", payload: body) +end + +def log(level, msg) + nputs(type: "log:#{level}", payload: msg) +end + +def info(msg) + log("info", msg) +end + +def debug(msg) + log("debug", msg) +end + +def error(msg) + log("error", msg) +end + +def exec_capture(cmd) + event :exec, { cmd: cmd } + + out_mutex = Mutex.new + output = "" + + status = Open3.popen3(cmd) do |stdin, stdout, stderr, wait_thr| + pid = wait_thr.pid + + stdin.close_write + + threads = [[stdout, "stdout"], [stderr, "stderr"]].map do |stream, stream_name| + Thread.new do + stream.each_line do |line| + nputs type: stream_name, payload: line.chomp + out_mutex.synchronize { output += line } + end + end + end + + threads.each { |thr| thr.join } + + wait_thr.value + end + + if !status.success? + event :error, { type: :exec, message: "unsuccessful command '#{cmd}'", exit_code: status.exitstatus, pid: status.pid } + exit 1 + end + + output +end + +def in_step(step, &block) + old_step = $current_step + $current_step = step + event :start + begin + ret = yield block + rescue StandardError => e + event :error, { type: :uncaught, message: e } + exit 1 + end + event :end + $current_step = old_step +end + +def ts + Time.now.utc.iso8601(6) +end + +event :start, { ts: ts() } + +if (git_repo = ENV["GIT_REPO"]) && !!git_repo + in_step Step::GIT_PULL do + `git config --global init.defaultBranch main` + ref = ENV["GIT_REF"] + artifact :git_info, { repository: git_repo, reference: ref } + exec_capture("git init") + exec_capture("git remote add origin #{git_repo}") + ref = exec_capture("git remote show origin | sed -n '/HEAD branch/s/.*: //p'").chomp if !ref + exec_capture("git -c protocol.version=2 fetch origin #{ref}") + exec_capture("git reset --hard --recurse-submodules FETCH_HEAD") + head = JSON.parse(exec_capture("git log -1 --pretty=format:'{\"commit\": \"%H\", \"author\": \"%an\", \"author_email\": \"%ae\", \"date\": \"%ad\", \"message\": \"%f\"}'")) + artifact :git_head, head + end +end + +in_step Step::PLAN do + exec_capture("flyctl launch generate -a my-app-name -o personal") + artifact :manifest, JSON.parse(File.read("manifest.json")) +end + +# in_step Step::DEPLOY do +# exec_capture("flyctl launch --from-manifest manifest.json") +# end + +# fly_json = begin +# JSON.parse(File.read("fly.json")) +# rescue e +# event Step::ERROR, { type: :parse, message: "could not parse fly.json: #{e}" } +# exit 1 +# end + +# event Step::PREPARE, { config: fly_json } + +# event Step::DEPLOY +# out = exec_capture("flyctl deploy -y") +# event Step::DEPLOY + +event :end, { ts: ts() } \ No newline at end of file diff --git a/deployer.Dockerfile b/deployer.Dockerfile new file mode 100644 index 0000000000..3a662e25e0 --- /dev/null +++ b/deployer.Dockerfile @@ -0,0 +1,10 @@ +FROM debian:bookworm + +RUN apt update && apt install -y --no-install-recommends ruby git + +COPY bin/flyctl /usr/local/bin/flyctl +COPY deploy.rb /deploy.rb + +WORKDIR /usr/src/app + +CMD ["/deploy.rb"] \ No newline at end of file diff --git a/deployer.Dockerfile.dockerignore b/deployer.Dockerfile.dockerignore new file mode 100644 index 0000000000..919e86fa52 --- /dev/null +++ b/deployer.Dockerfile.dockerignore @@ -0,0 +1,4 @@ +* + +!/bin/flyctl +!deploy.rb \ No newline at end of file diff --git a/internal/command/launch/cmd.go b/internal/command/launch/cmd.go index 13c0c155ee..fd6e10df79 100644 --- a/internal/command/launch/cmd.go +++ b/internal/command/launch/cmd.go @@ -106,8 +106,24 @@ func New() (cmd *cobra.Command) { Name: "yaml", Description: "Generate configuration in YAML format", }, + // don't try to generate a name + flag.Bool{ + Name: "force-name", + Description: "Force app name supplied by --name", + Default: false, + Hidden: true, + }, + // like reuse-app, but non-legacy! + flag.Bool{ + Name: "no-create-app", + Description: "Do not create an app", + Default: false, + Hidden: true, + }, ) + cmd.AddCommand(newGenerate()) + return } @@ -216,6 +232,16 @@ func run(ctx context.Context) (err error) { return err } + if launchManifest != nil { + // we loaded a manifest... + cache = &planBuildCache{ + appConfig: launchManifest.Config, + sourceInfo: nil, + appNameValidated: true, + warnedNoCcHa: true, + } + } + // "--from" arg handling ctx, err = setupFromTemplate(ctx) if err != nil { diff --git a/internal/command/launch/generate.go b/internal/command/launch/generate.go new file mode 100644 index 0000000000..f77511f927 --- /dev/null +++ b/internal/command/launch/generate.go @@ -0,0 +1,70 @@ +package launch + +import ( + "context" + "encoding/json" + "os" + + "github.com/spf13/cobra" + "github.com/superfly/flyctl/internal/command" + "github.com/superfly/flyctl/internal/flag" +) + +func newGenerate() *cobra.Command { + genDesc := "generates a launch manifest, including a config" + cmd := command.New("generate", genDesc, genDesc, runGenerate, + command.RequireAppName, + command.LoadAppConfigIfPresent, + ) + + flag.Add(cmd, + flag.App(), + flag.Region(), + flag.Org(), + flag.AppConfig(), + flag.Bool{ + Name: "ha", + Description: "Create spare machines that increases app availability", + Default: false, + }, + flag.String{ + Name: "manifest-path", + Shorthand: "p", + Description: "Path to write the manifest to", + Default: "manifest.json", + }, + ) + + // not that useful anywhere else yet + cmd.Hidden = true + + return cmd +} + +func runGenerate(ctx context.Context) error { + ctx = context.WithValue(ctx, genContextKey{}, true) + + recoverableErrors := recoverableErrorBuilder{canEnterUi: false} + launchManifest, _, err := buildManifest(ctx, &recoverableErrors) + if err != nil { + return err + } + + file, err := os.Create(flag.GetString(ctx, "manifest-path")) + if err != nil { + return err + } + defer file.Close() + + jsonEncoder := json.NewEncoder(file) + jsonEncoder.SetIndent("", " ") + + return jsonEncoder.Encode(launchManifest) +} + +type genContextKey struct{} + +func isGenerate(ctx context.Context) bool { + v, ok := ctx.Value(genContextKey{}).(bool) + return ok && v +} diff --git a/internal/command/launch/launch.go b/internal/command/launch/launch.go index 1b1dae6369..79aaee537f 100644 --- a/internal/command/launch/launch.go +++ b/internal/command/launch/launch.go @@ -47,12 +47,22 @@ func (state *launchState) Launch(ctx context.Context) error { state.warnedNoCcHa = true } - app, err := state.createApp(ctx) - if err != nil { - return err + var app *fly.App + if flag.GetBool(ctx, "no-create-app") { + fmt.Fprintf(io.Out, "app config: %+v\n", state.appConfig) + + app, err = state.getApp(ctx) + if err != nil { + return err + } + } else { + app, err = state.createApp(ctx) + if err != nil { + return err + } + fmt.Fprintf(io.Out, "Created app '%s' in organization '%s'\n", app.Name, app.Organization.Slug) } - fmt.Fprintf(io.Out, "Created app '%s' in organization '%s'\n", app.Name, app.Organization.Slug) fmt.Fprintf(io.Out, "Admin URL: https://fly.io/apps/%s\n", app.Name) fmt.Fprintf(io.Out, "Hostname: %s.fly.dev\n", app.Name) @@ -192,3 +202,12 @@ func (state *launchState) createApp(ctx context.Context) (*fly.App, error) { return app, nil } + +func (state *launchState) getApp(ctx context.Context) (*fly.App, error) { + apiClient := flyutil.ClientFromContext(ctx) + app, err := apiClient.GetApp(ctx, state.Plan.AppName) + if err != nil { + return nil, err + } + return app, nil +} diff --git a/internal/command/launch/plan_builder.go b/internal/command/launch/plan_builder.go index 2ab5a5a239..2f19333488 100644 --- a/internal/command/launch/plan_builder.go +++ b/internal/command/launch/plan_builder.go @@ -19,6 +19,7 @@ import ( "github.com/superfly/flyctl/internal/cmdutil" "github.com/superfly/flyctl/internal/command/launch/plan" "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flag/flagnames" "github.com/superfly/flyctl/internal/flyerr" "github.com/superfly/flyctl/internal/flyutil" "github.com/superfly/flyctl/internal/haikunator" @@ -249,9 +250,12 @@ func buildManifest(ctx context.Context, recoverableErrors *recoverableErrorBuild } } + appConfig.AppName = lp.AppName + return &LaunchManifest{ Plan: lp, PlanSource: planSource, + Config: appConfig, }, buildCache, nil } @@ -395,6 +399,7 @@ func stateFromManifest(ctx context.Context, m LaunchManifest, optionalCache *pla LaunchManifest: LaunchManifest{ m.Plan, m.PlanSource, + appConfig, }, env: envVars, planBuildCache: planBuildCache{ @@ -512,6 +517,16 @@ func determineAppName(ctx context.Context, appConfig *appconfig.Config, configPa appName := flag.GetString(ctx, "name") cause := "specified on the command line" + if flag.GetBool(ctx, "force-name") { + if appName == "" { + return "", "", flyerr.GenericErr{ + Err: "app name required when using --force-name", + Suggest: "Specify the app name with the --name flag", + } + } + return appName, cause, nil + } + if !flag.GetBool(ctx, "generate-name") { // --generate-name wasn't specified, so we try to get a name from the config file or directory name. if appName == "" { @@ -570,6 +585,12 @@ func appNameTaken(ctx context.Context, name string) (bool, error) { // determineOrg returns the org specified on the command line, or the personal org if left unspecified func determineOrg(ctx context.Context) (*fly.Organization, string, error) { + if isGenerate(ctx) { + if slug := flag.GetString(ctx, flagnames.Org); slug != "" { + return &fly.Organization{Slug: slug}, "specified as flag", nil + } + } + client := flyutil.ClientFromContext(ctx) orgs, err := client.GetOrganizations(ctx) diff --git a/internal/command/launch/state.go b/internal/command/launch/state.go index 7253b46ffd..1d01e6b0ee 100644 --- a/internal/command/launch/state.go +++ b/internal/command/launch/state.go @@ -10,6 +10,7 @@ import ( "github.com/samber/lo" fly "github.com/superfly/fly-go" "github.com/superfly/flyctl/gql" + "github.com/superfly/flyctl/internal/appconfig" extensions_core "github.com/superfly/flyctl/internal/command/extensions/core" "github.com/superfly/flyctl/internal/command/launch/plan" "github.com/superfly/flyctl/internal/flag" @@ -31,8 +32,9 @@ type launchPlanSource struct { } type LaunchManifest struct { - Plan *plan.LaunchPlan - PlanSource *launchPlanSource + Plan *plan.LaunchPlan `json:"plan,omitempty"` + PlanSource *launchPlanSource `json:"plan_source,omitempty"` + Config *appconfig.Config `json:"config,omitempty"` } type launchState struct { From 8d8957d53fcd0d1ab16de9ea40af8d52670cedd6 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 15 Jul 2024 15:02:13 -0400 Subject: [PATCH 002/104] add app name and org slug speicifiers --- deploy.rb | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/deploy.rb b/deploy.rb index 9a351f1825..172a2ad3ee 100755 --- a/deploy.rb +++ b/deploy.rb @@ -114,6 +114,18 @@ def ts event :start, { ts: ts() } +APP_NAME = ENV["DEPLOY_APP_NAME"] +if !APP_NAME + event :error, { type: :validation, message: "missing app name" } + exit 1 +end + +ORG_SLUG = ENV["DEPLOY_ORG_SLUG"] +if !ORG_SLUG + event :error, { type: :validation, message: "missing organization slug" } + exit 1 +end + if (git_repo = ENV["GIT_REPO"]) && !!git_repo in_step Step::GIT_PULL do `git config --global init.defaultBranch main` @@ -130,7 +142,7 @@ def ts end in_step Step::PLAN do - exec_capture("flyctl launch generate -a my-app-name -o personal") + exec_capture("flyctl launch generate -a #{APP_NAME} -o #{ORG_SLUG}") artifact :manifest, JSON.parse(File.read("manifest.json")) end From f34c9e967fa1d5157373d431834ae71e9f0fb20b Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Wed, 17 Jul 2024 08:07:49 -0400 Subject: [PATCH 003/104] generate full config beforehand, if DEPLOY_NOW env is defined, run fly deploy with everything from the manifest --- deploy.rb | 37 +++++++++++++++-------------- internal/command/launch/generate.go | 2 ++ internal/command/launch/launch.go | 27 +++++++++++---------- 3 files changed, 35 insertions(+), 31 deletions(-) diff --git a/deploy.rb b/deploy.rb index 172a2ad3ee..f5d35587b3 100755 --- a/deploy.rb +++ b/deploy.rb @@ -98,14 +98,15 @@ def in_step(step, &block) old_step = $current_step $current_step = step event :start - begin - ret = yield block + ret = begin + yield block rescue StandardError => e event :error, { type: :uncaught, message: e } exit 1 end event :end $current_step = old_step + ret end def ts @@ -141,26 +142,26 @@ def ts end end -in_step Step::PLAN do - exec_capture("flyctl launch generate -a #{APP_NAME} -o #{ORG_SLUG}") - artifact :manifest, JSON.parse(File.read("manifest.json")) +manifest = in_step Step::PLAN do + exec_capture("flyctl launch generate -a #{APP_NAME} -o #{ORG_SLUG} --manifest-path /tmp/manifest.json") + manifest = JSON.parse(File.read("/tmp/manifest.json")) + artifact :manifest, manifest end -# in_step Step::DEPLOY do -# exec_capture("flyctl launch --from-manifest manifest.json") -# end +puts manifest -# fly_json = begin -# JSON.parse(File.read("fly.json")) -# rescue e -# event Step::ERROR, { type: :parse, message: "could not parse fly.json: #{e}" } -# exit 1 -# end +if ENV["DEPLOY_NOW"] + in_step Step::DEPLOY do + vm_cpukind = manifest["plan"]["vm_cpukind"] + vm_cpus = manifest["plan"]["vm_cpus"] + vm_memory = manifest["plan"]["vm_memory"] + vm_size = manifest["plan"]["vm_size"] + region = manifest["plan"]["region"] -# event Step::PREPARE, { config: fly_json } + File.write("/tmp/fly.json", manifest["config"].to_json) -# event Step::DEPLOY -# out = exec_capture("flyctl deploy -y") -# event Step::DEPLOY + exec_capture("flyctl deploy -a #{APP_NAME} --region #{region} --vm-cpu-kind #{vm_cpukind} --vm-cpus #{vm_cpus} --vm-memory #{vm_memory} --vm-size #{vm_size} -c /tmp/fly.json") + end +end event :end, { ts: ts() } \ No newline at end of file diff --git a/internal/command/launch/generate.go b/internal/command/launch/generate.go index f77511f927..4818a853de 100644 --- a/internal/command/launch/generate.go +++ b/internal/command/launch/generate.go @@ -50,6 +50,8 @@ func runGenerate(ctx context.Context) error { return err } + updateConfig(launchManifest.Plan, nil, launchManifest.Config) + file, err := os.Create(flag.GetString(ctx, "manifest-path")) if err != nil { return err diff --git a/internal/command/launch/launch.go b/internal/command/launch/launch.go index 79aaee537f..d3615224cf 100644 --- a/internal/command/launch/launch.go +++ b/internal/command/launch/launch.go @@ -9,6 +9,7 @@ import ( fly "github.com/superfly/fly-go" "github.com/superfly/fly-go/flaps" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/command/launch/plan" "github.com/superfly/flyctl/internal/flag" "github.com/superfly/flyctl/internal/flag/flagnames" "github.com/superfly/flyctl/internal/flapsutil" @@ -32,7 +33,7 @@ func (state *launchState) Launch(ctx context.Context) error { return err } - state.updateConfig(ctx) + updateConfig(state.Plan, state.env, state.appConfig) if err := state.validateExtensions(ctx); err != nil { return err @@ -153,15 +154,15 @@ func (state *launchState) updateComputeFromDeprecatedGuestFields(ctx context.Con } // updateConfig populates the appConfig with the plan's values -func (state *launchState) updateConfig(ctx context.Context) { - state.appConfig.AppName = state.Plan.AppName - state.appConfig.PrimaryRegion = state.Plan.RegionCode - if state.env != nil { - state.appConfig.SetEnvVariables(state.env) - } - if state.Plan.HttpServicePort != 0 { - if state.appConfig.HTTPService == nil { - state.appConfig.HTTPService = &appconfig.HTTPService{ +func updateConfig(plan *plan.LaunchPlan, env map[string]string, appConfig *appconfig.Config) { + appConfig.AppName = plan.AppName + appConfig.PrimaryRegion = plan.RegionCode + if env != nil { + appConfig.SetEnvVariables(env) + } + if plan.HttpServicePort != 0 { + if appConfig.HTTPService == nil { + appConfig.HTTPService = &appconfig.HTTPService{ ForceHTTPS: true, AutoStartMachines: fly.Pointer(true), AutoStopMachines: fly.Pointer(true), @@ -169,11 +170,11 @@ func (state *launchState) updateConfig(ctx context.Context) { Processes: []string{"app"}, } } - state.appConfig.HTTPService.InternalPort = state.Plan.HttpServicePort + appConfig.HTTPService.InternalPort = plan.HttpServicePort } else { - state.appConfig.HTTPService = nil + appConfig.HTTPService = nil } - state.appConfig.Compute = state.Plan.Compute + appConfig.Compute = plan.Compute } // createApp creates the fly.io app for the plan From ed94877d1a8c6e0e83abce2b3a58d83bd5bf6103 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Wed, 17 Jul 2024 10:12:54 -0400 Subject: [PATCH 004/104] fix manifest return, don't specify region when deploying --- deploy.rb | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/deploy.rb b/deploy.rb index f5d35587b3..f6df989a26 100755 --- a/deploy.rb +++ b/deploy.rb @@ -146,21 +146,19 @@ def ts exec_capture("flyctl launch generate -a #{APP_NAME} -o #{ORG_SLUG} --manifest-path /tmp/manifest.json") manifest = JSON.parse(File.read("/tmp/manifest.json")) artifact :manifest, manifest + manifest end -puts manifest - if ENV["DEPLOY_NOW"] in_step Step::DEPLOY do vm_cpukind = manifest["plan"]["vm_cpukind"] vm_cpus = manifest["plan"]["vm_cpus"] vm_memory = manifest["plan"]["vm_memory"] vm_size = manifest["plan"]["vm_size"] - region = manifest["plan"]["region"] File.write("/tmp/fly.json", manifest["config"].to_json) - exec_capture("flyctl deploy -a #{APP_NAME} --region #{region} --vm-cpu-kind #{vm_cpukind} --vm-cpus #{vm_cpus} --vm-memory #{vm_memory} --vm-size #{vm_size} -c /tmp/fly.json") + exec_capture("flyctl deploy -a #{APP_NAME} --vm-cpu-kind #{vm_cpukind} --vm-cpus #{vm_cpus} --vm-memory #{vm_memory} --vm-size #{vm_size} -c /tmp/fly.json") end end From a38be4ee12a0284bb48cdf193736cd167e4b8095 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Thu, 18 Jul 2024 11:08:34 -0400 Subject: [PATCH 005/104] support more env vars to configure the deployer, adjust fly launch generate --- deploy.rb | 43 +++++++++++++++++++++++----- internal/command/launch/generate.go | 9 ++++++ internal/command/launch/plan/plan.go | 2 +- 3 files changed, 46 insertions(+), 8 deletions(-) diff --git a/deploy.rb b/deploy.rb index f6df989a26..82fa7b5187 100755 --- a/deploy.rb +++ b/deploy.rb @@ -3,6 +3,7 @@ require 'json' require 'time' require 'open3' +require 'uri' LOG_PREFIX = ENV["LOG_PREFIX"] @@ -132,18 +133,46 @@ def ts `git config --global init.defaultBranch main` ref = ENV["GIT_REF"] artifact :git_info, { repository: git_repo, reference: ref } + exec_capture("git init") - exec_capture("git remote add origin #{git_repo}") + + git_repo_url = begin + URI(git_repo) + rescue StandardError => e + event :error, { type: :invalid_git_repo_url, message: e } + exit 1 + end + + if (user = ENV["GIT_URL_USER"]) && !!user + git_repo_url.user = user + end + + if (password = ENV["GIT_URL_PASSWORD"]) && !!password + git_repo_url.password = password + end + + exec_capture("git remote add origin #{git_repo_url.to_s}") + ref = exec_capture("git remote show origin | sed -n '/HEAD branch/s/.*: //p'").chomp if !ref + exec_capture("git -c protocol.version=2 fetch origin #{ref}") exec_capture("git reset --hard --recurse-submodules FETCH_HEAD") + head = JSON.parse(exec_capture("git log -1 --pretty=format:'{\"commit\": \"%H\", \"author\": \"%an\", \"author_email\": \"%ae\", \"date\": \"%ad\", \"message\": \"%f\"}'")) + artifact :git_head, head end end manifest = in_step Step::PLAN do - exec_capture("flyctl launch generate -a #{APP_NAME} -o #{ORG_SLUG} --manifest-path /tmp/manifest.json") + cmd = "flyctl launch generate -a #{APP_NAME} -o #{ORG_SLUG} --manifest-path /tmp/manifest.json" + if (region = ENV["DEPLOY_APP_REGION"]) && !!region + cmd += "--region #{region}" + end + if (internal_port = ENV["DEPLOY_APP_INTERNAL_PORT"]) && !!internal_port + cmd += "--internal-port #{internal_port}" + end + exec_capture(cmd) manifest = JSON.parse(File.read("/tmp/manifest.json")) artifact :manifest, manifest manifest @@ -151,14 +180,14 @@ def ts if ENV["DEPLOY_NOW"] in_step Step::DEPLOY do - vm_cpukind = manifest["plan"]["vm_cpukind"] - vm_cpus = manifest["plan"]["vm_cpus"] - vm_memory = manifest["plan"]["vm_memory"] - vm_size = manifest["plan"]["vm_size"] + vm_cpu_kind = ENV.fetch("DEPLOY_VM_CPU_KIND", manifest["plan"]["vm_cpu_kind"]) + vm_cpus = ENV.fetch("DEPLOY_VM_CPUS", manifest["plan"]["vm_cpus"]) + vm_memory = ENV.fetch("DEPLOY_VM_MEMORY", manifest["plan"]["vm_memory"]) + vm_size = ENV.fetch("DEPLOY_VM_SIZE", manifest["plan"]["vm_size"]) File.write("/tmp/fly.json", manifest["config"].to_json) - exec_capture("flyctl deploy -a #{APP_NAME} --vm-cpu-kind #{vm_cpukind} --vm-cpus #{vm_cpus} --vm-memory #{vm_memory} --vm-size #{vm_size} -c /tmp/fly.json") + exec_capture("flyctl deploy -a #{APP_NAME} --vm-cpu-kind #{vm_cpu_kind} --vm-cpus #{vm_cpus} --vm-memory #{vm_memory} --vm-size #{vm_size} -c /tmp/fly.json") end end diff --git a/internal/command/launch/generate.go b/internal/command/launch/generate.go index 4818a853de..102d50b864 100644 --- a/internal/command/launch/generate.go +++ b/internal/command/launch/generate.go @@ -22,6 +22,11 @@ func newGenerate() *cobra.Command { flag.Region(), flag.Org(), flag.AppConfig(), + flag.Int{ + Name: "internal-port", + Description: "Set internal_port for all services in the generated fly.toml", + Default: -1, + }, flag.Bool{ Name: "ha", Description: "Create spare machines that increases app availability", @@ -52,6 +57,10 @@ func runGenerate(ctx context.Context) error { updateConfig(launchManifest.Plan, nil, launchManifest.Config) + if n := flag.GetInt(ctx, "internal-port"); n > 0 { + launchManifest.Config.SetInternalPort(n) + } + file, err := os.Create(flag.GetString(ctx, "manifest-path")) if err != nil { return err diff --git a/internal/command/launch/plan/plan.go b/internal/command/launch/plan/plan.go index 74dbb46d21..8752c943b5 100644 --- a/internal/command/launch/plan/plan.go +++ b/internal/command/launch/plan/plan.go @@ -14,7 +14,7 @@ type LaunchPlan struct { HighAvailability bool `json:"ha"` // Deprecated: The UI currently returns this instead of Compute, but new development should use Compute. - CPUKind string `json:"vm_cpukind,omitempty"` + CPUKind string `json:"vm_cpu_kind,omitempty"` // Deprecated: The UI currently returns this instead of Compute, but new development should use Compute. CPUs int `json:"vm_cpus,omitempty"` // Deprecated: The UI currently returns this instead of Compute, but new development should use Compute. From d889fee11998546f3a2688d6e5b64a33858abd33 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Fri, 19 Jul 2024 14:14:08 -0400 Subject: [PATCH 006/104] fix spacing and types, set vm size options in config --- deploy.rb | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/deploy.rb b/deploy.rb index 82fa7b5187..a29e18a3bd 100755 --- a/deploy.rb +++ b/deploy.rb @@ -159,7 +159,7 @@ def ts exec_capture("git reset --hard --recurse-submodules FETCH_HEAD") head = JSON.parse(exec_capture("git log -1 --pretty=format:'{\"commit\": \"%H\", \"author\": \"%an\", \"author_email\": \"%ae\", \"date\": \"%ad\", \"message\": \"%f\"}'")) - + artifact :git_head, head end end @@ -167,27 +167,36 @@ def ts manifest = in_step Step::PLAN do cmd = "flyctl launch generate -a #{APP_NAME} -o #{ORG_SLUG} --manifest-path /tmp/manifest.json" if (region = ENV["DEPLOY_APP_REGION"]) && !!region - cmd += "--region #{region}" + cmd += " --region #{region}" end if (internal_port = ENV["DEPLOY_APP_INTERNAL_PORT"]) && !!internal_port - cmd += "--internal-port #{internal_port}" + cmd += " --internal-port #{internal_port}" end exec_capture(cmd) manifest = JSON.parse(File.read("/tmp/manifest.json")) + + vm_cpu_kind = ENV.fetch("DEPLOY_VM_CPU_KIND", manifest["plan"]["vm_cpu_kind"]) + vm_cpus = ENV.fetch("DEPLOY_VM_CPUS", manifest["plan"]["vm_cpus"]) + vm_memory = ENV.fetch("DEPLOY_VM_MEMORY", manifest["plan"]["vm_memory"]) + vm_size = ENV.fetch("DEPLOY_VM_SIZE", manifest["plan"]["vm_size"]) + + # override this to be sure... + manifest["config"]["vm"] = [{ + size: vm_size, + memory: vm_memory, + cpu_kind: vm_cpu_kind, + cpus: vm_cpus.to_i + }] + artifact :manifest, manifest manifest end if ENV["DEPLOY_NOW"] in_step Step::DEPLOY do - vm_cpu_kind = ENV.fetch("DEPLOY_VM_CPU_KIND", manifest["plan"]["vm_cpu_kind"]) - vm_cpus = ENV.fetch("DEPLOY_VM_CPUS", manifest["plan"]["vm_cpus"]) - vm_memory = ENV.fetch("DEPLOY_VM_MEMORY", manifest["plan"]["vm_memory"]) - vm_size = ENV.fetch("DEPLOY_VM_SIZE", manifest["plan"]["vm_size"]) - File.write("/tmp/fly.json", manifest["config"].to_json) - exec_capture("flyctl deploy -a #{APP_NAME} --vm-cpu-kind #{vm_cpu_kind} --vm-cpus #{vm_cpus} --vm-memory #{vm_memory} --vm-size #{vm_size} -c /tmp/fly.json") + exec_capture("flyctl deploy -a #{APP_NAME} -c /tmp/fly.json") end end From 8726038277e68fdf94e60a69bbcbdc30f70dd9e7 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Fri, 26 Jul 2024 08:27:08 -0400 Subject: [PATCH 007/104] add elixir and nodejs --- deployer.Dockerfile | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/deployer.Dockerfile b/deployer.Dockerfile index 3a662e25e0..fb3a63c247 100644 --- a/deployer.Dockerfile +++ b/deployer.Dockerfile @@ -1,6 +1,20 @@ FROM debian:bookworm -RUN apt update && apt install -y --no-install-recommends ruby git +RUN apt update && apt install -y --no-install-recommends ruby git curl clang g++ libncurses5 libncurses-dev libncurses5-dev make unzip locales openssl libssl-dev + +# Erlang + Elixir +COPY --from=hexpm/elixir:1.17.2-erlang-27.0.1-debian-bookworm-20240722-slim /usr/local/bin/ /usr/local/bin +COPY --from=hexpm/elixir:1.17.2-erlang-27.0.1-debian-bookworm-20240722-slim /usr/local/lib/elixir/ /usr/local/lib/elixir +COPY --from=hexpm/elixir:1.17.2-erlang-27.0.1-debian-bookworm-20240722-slim /usr/local/lib/erlang/ /usr/local/lib/erlang +# Ensure you have everything compiled so fly launch works +RUN mix local.hex --force && mix local.rebar --force +ENV MIX_ENV=prod + +# Node.js +COPY --from=node:22-bookworm /usr/local/bin/ /usr/local/bin +COPY --from=node:22-bookworm /usr/local/lib/node_modules/ /usr/local/lib/node_modules +COPY --from=node:22-bookworm /opt/yarn-v1.22.22/ /opt/yarn-v1.22.22 +ENV NODE_ENV=production COPY bin/flyctl /usr/local/bin/flyctl COPY deploy.rb /deploy.rb From 108f41a78fd0f2da4516f98816e9dac6d4173bdc Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 29 Jul 2024 14:55:29 -0400 Subject: [PATCH 008/104] adds a build step, a diff artifact and finally generates files on disk during the plan step --- deploy.rb | 24 +++++++++++++++++++---- internal/command/launch/generate.go | 30 ++++++++++++++++++++++++++--- 2 files changed, 47 insertions(+), 7 deletions(-) diff --git a/deploy.rb b/deploy.rb index a29e18a3bd..d0995c07a1 100755 --- a/deploy.rb +++ b/deploy.rb @@ -4,6 +4,7 @@ require 'time' require 'open3' require 'uri' +require 'securerandom' LOG_PREFIX = ENV["LOG_PREFIX"] @@ -11,6 +12,7 @@ module Step ROOT = :__root__ GIT_PULL = :git_pull PLAN = :plan + BUILD = :build DEPLOY = :deploy end @@ -130,7 +132,7 @@ def ts if (git_repo = ENV["GIT_REPO"]) && !!git_repo in_step Step::GIT_PULL do - `git config --global init.defaultBranch main` + # `git config --global init.defaultBranch main` ref = ENV["GIT_REF"] artifact :git_info, { repository: git_repo, reference: ref } @@ -189,14 +191,28 @@ def ts }] artifact :manifest, manifest + + exec_capture("git add -A") + diff = exec_capture("git diff --cached") + artifact :diff, diff + manifest end +# Write the fly config file to a tmp directory +File.write("/tmp/fly.json", manifest["config"].to_json) + +image_tag = SecureRandom.hex(16) +image_ref = "registry.fly.io/#{APP_NAME}:#{image_tag}" + +in_step Step::BUILD do + exec_capture("flyctl deploy -a #{APP_NAME} -c /tmp/fly.json --build-only --push --image-label #{image_tag}") + artifact :docker_image, image_ref +end + if ENV["DEPLOY_NOW"] in_step Step::DEPLOY do - File.write("/tmp/fly.json", manifest["config"].to_json) - - exec_capture("flyctl deploy -a #{APP_NAME} -c /tmp/fly.json") + exec_capture("flyctl deploy -a #{APP_NAME} -c /tmp/fly.json --image #{image_ref}") end end diff --git a/internal/command/launch/generate.go b/internal/command/launch/generate.go index 102d50b864..c3c52630e6 100644 --- a/internal/command/launch/generate.go +++ b/internal/command/launch/generate.go @@ -50,7 +50,7 @@ func runGenerate(ctx context.Context) error { ctx = context.WithValue(ctx, genContextKey{}, true) recoverableErrors := recoverableErrorBuilder{canEnterUi: false} - launchManifest, _, err := buildManifest(ctx, &recoverableErrors) + launchManifest, planBuildCache, err := buildManifest(ctx, &recoverableErrors) if err != nil { return err } @@ -61,7 +61,9 @@ func runGenerate(ctx context.Context) error { launchManifest.Config.SetInternalPort(n) } - file, err := os.Create(flag.GetString(ctx, "manifest-path")) + manifestPath := flag.GetString(ctx, "manifest-path") + + file, err := os.Create(manifestPath) if err != nil { return err } @@ -70,7 +72,29 @@ func runGenerate(ctx context.Context) error { jsonEncoder := json.NewEncoder(file) jsonEncoder.SetIndent("", " ") - return jsonEncoder.Encode(launchManifest) + if err := jsonEncoder.Encode(launchManifest); err != nil { + return err + } + + state := &launchState{workingDir: ".", configPath: "fly.json", LaunchManifest: *launchManifest, env: map[string]string{}, planBuildCache: *planBuildCache, cache: map[string]interface{}{}} + + if err := state.satisfyScannerBeforeDb(ctx); err != nil { + return err + } + + if err := state.satisfyScannerBeforeDb(ctx); err != nil { + return err + } + + if err = state.createDockerIgnore(ctx); err != nil { + return err + } + + if err = state.scannerSetAppconfig(ctx); err != nil { + return err + } + + return nil } type genContextKey struct{} From 836015d1618e7bb5a5a2652e4fb6ff0a877deb9f Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Thu, 1 Aug 2024 11:29:47 -0400 Subject: [PATCH 009/104] send metadata about the deploy steps before starting --- deploy.rb | 91 +++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 62 insertions(+), 29 deletions(-) diff --git a/deploy.rb b/deploy.rb index d0995c07a1..f3dd97a779 100755 --- a/deploy.rb +++ b/deploy.rb @@ -16,6 +16,15 @@ module Step DEPLOY = :deploy end +module Artifact + META = :meta + GIT_INFO = :git_info + GIT_HEAD = :git_head + MANIFEST = :manifest + DIFF = :diff + DOCKER_IMAGE = :docker_image +end + $current_step = Step::ROOT $counter = 0 @@ -116,44 +125,68 @@ def ts Time.now.utc.iso8601(6) end +def get_env(name) + value = ENV[name]&.strip + if value.nil? || value.empty? + return nil + end + value +end + +# start of actual logic + event :start, { ts: ts() } -APP_NAME = ENV["DEPLOY_APP_NAME"] +APP_NAME = get_env("DEPLOY_APP_NAME") if !APP_NAME event :error, { type: :validation, message: "missing app name" } exit 1 end -ORG_SLUG = ENV["DEPLOY_ORG_SLUG"] +ORG_SLUG = get_env("DEPLOY_ORG_SLUG") if !ORG_SLUG event :error, { type: :validation, message: "missing organization slug" } exit 1 end -if (git_repo = ENV["GIT_REPO"]) && !!git_repo +GIT_REPO = get_env("GIT_REPO") + +DEPLOY_NOW = !get_env("DEPLOY_NOW").nil? + +steps = [] + +steps.push({id: Step::GIT_PULL, description: "Setup and pull from git repository"}) if GIT_REPO +steps.push({id: Step::PLAN, description: "Plan deployment"}) +steps.push({id: Step::BUILD, description: "Build image"}) if GIT_REPO +steps.push({id: Step::DEPLOY, description: "Deploy application"}) if DEPLOY_NOW + +artifact Artifact::META, { steps: steps } + +GIT_REPO_URL = if GIT_REPO + repo_url = begin + URI(GIT_REPO) + rescue StandardError => e + event :error, { type: :invalid_git_repo_url, message: e } + exit 1 + end + if (user = get_env("GIT_URL_USER")) + repo_url.user = user.strip + end + if (password = get_env("GIT_URL_PASSWORD")) + repo_url.password = password.strip + end + repo_url +end + +if GIT_REPO_URL in_step Step::GIT_PULL do - # `git config --global init.defaultBranch main` - ref = ENV["GIT_REF"] - artifact :git_info, { repository: git_repo, reference: ref } + `git config --global init.defaultBranch main` # NOTE: this is to avoid a large warning message + ref = get_env("GIT_REF") + artifact Artifact::GIT_INFO, { repository: GIT_REPO, reference: ref } exec_capture("git init") - - git_repo_url = begin - URI(git_repo) - rescue StandardError => e - event :error, { type: :invalid_git_repo_url, message: e } - exit 1 - end - - if (user = ENV["GIT_URL_USER"]) && !!user - git_repo_url.user = user - end - - if (password = ENV["GIT_URL_PASSWORD"]) && !!password - git_repo_url.password = password - end - exec_capture("git remote add origin #{git_repo_url.to_s}") + exec_capture("git remote add origin #{GIT_REPO_URL.to_s}") ref = exec_capture("git remote show origin | sed -n '/HEAD branch/s/.*: //p'").chomp if !ref @@ -162,16 +195,16 @@ def ts head = JSON.parse(exec_capture("git log -1 --pretty=format:'{\"commit\": \"%H\", \"author\": \"%an\", \"author_email\": \"%ae\", \"date\": \"%ad\", \"message\": \"%f\"}'")) - artifact :git_head, head + artifact Artifact::GIT_HEAD, head end end manifest = in_step Step::PLAN do cmd = "flyctl launch generate -a #{APP_NAME} -o #{ORG_SLUG} --manifest-path /tmp/manifest.json" - if (region = ENV["DEPLOY_APP_REGION"]) && !!region + if (region = get_env("DEPLOY_APP_REGION")) cmd += " --region #{region}" end - if (internal_port = ENV["DEPLOY_APP_INTERNAL_PORT"]) && !!internal_port + if (internal_port = get_env("DEPLOY_APP_INTERNAL_PORT")) cmd += " --internal-port #{internal_port}" end exec_capture(cmd) @@ -190,11 +223,11 @@ def ts cpus: vm_cpus.to_i }] - artifact :manifest, manifest + artifact Artifact::MANIFEST, manifest exec_capture("git add -A") diff = exec_capture("git diff --cached") - artifact :diff, diff + artifact Artifact::DIFF, diff manifest end @@ -207,10 +240,10 @@ def ts in_step Step::BUILD do exec_capture("flyctl deploy -a #{APP_NAME} -c /tmp/fly.json --build-only --push --image-label #{image_tag}") - artifact :docker_image, image_ref + artifact Artifact::DOCKER_IMAGE, image_ref end -if ENV["DEPLOY_NOW"] +if DEPLOY_NOW in_step Step::DEPLOY do exec_capture("flyctl deploy -a #{APP_NAME} -c /tmp/fly.json --image #{image_ref}") end From 03d828cfdc6720005911786983417e34ec03b6e2 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Fri, 2 Aug 2024 09:18:51 -0400 Subject: [PATCH 010/104] fly pg creation from deployer --- deploy.rb | 68 ++++++++++++--- internal/command/postgres/create.go | 130 ++++++++++++++++------------ 2 files changed, 131 insertions(+), 67 deletions(-) diff --git a/deploy.rb b/deploy.rb index f3dd97a779..856406c49a 100755 --- a/deploy.rb +++ b/deploy.rb @@ -13,6 +13,7 @@ module Step GIT_PULL = :git_pull PLAN = :plan BUILD = :build + FLY_POSTGRES_CREATE = :fly_postgres_create DEPLOY = :deploy end @@ -22,6 +23,7 @@ module Artifact GIT_HEAD = :git_head MANIFEST = :manifest DIFF = :diff + FLY_POSTGRES = :fly_postgres DOCKER_IMAGE = :docker_image end @@ -151,17 +153,6 @@ def get_env(name) GIT_REPO = get_env("GIT_REPO") -DEPLOY_NOW = !get_env("DEPLOY_NOW").nil? - -steps = [] - -steps.push({id: Step::GIT_PULL, description: "Setup and pull from git repository"}) if GIT_REPO -steps.push({id: Step::PLAN, description: "Plan deployment"}) -steps.push({id: Step::BUILD, description: "Build image"}) if GIT_REPO -steps.push({id: Step::DEPLOY, description: "Deploy application"}) if DEPLOY_NOW - -artifact Artifact::META, { steps: steps } - GIT_REPO_URL = if GIT_REPO repo_url = begin URI(GIT_REPO) @@ -178,6 +169,34 @@ def get_env(name) repo_url end +PG_PROVIDER = get_env("DEPLOY_PG_PROVIDER") +FLY_PG_PROVIDER = PG_PROVIDER == "fly_postgres" + +PG_NAME = get_env("DEPLOY_PG_NAME") +PG_FLY_CONFIG = get_env("DEPLOY_PG_FLY_CONFIG") +PG_REGION = get_env("DEPLOY_PG_REGION") + +if FLY_PG_PROVIDER + if !PG_FLY_CONFIG + event :error, { type: :validation, message: "Missing DEPLOY_PG_FLY_CONFIG" } + exit 1 + end +end + +DEPLOY_NOW = !get_env("DEPLOY_NOW").nil? + +steps = [] + +steps.push({id: Step::GIT_PULL, description: "Setup and pull from git repository"}) if GIT_REPO +steps.push({id: Step::PLAN, description: "Plan deployment"}) +steps.push({id: Step::BUILD, description: "Build image"}) if GIT_REPO +steps.push({id: Step::FLY_POSTGRES_CREATE, description: "Create and attach PostgreSQL database"}) if FLY_PG_PROVIDER +steps.push({id: Step::DEPLOY, description: "Deploy application"}) if DEPLOY_NOW + +artifact Artifact::META, { steps: steps } + +APP_REGION = get_env("DEPLOY_APP_REGION") + if GIT_REPO_URL in_step Step::GIT_PULL do `git config --global init.defaultBranch main` # NOTE: this is to avoid a large warning message @@ -201,7 +220,7 @@ def get_env(name) manifest = in_step Step::PLAN do cmd = "flyctl launch generate -a #{APP_NAME} -o #{ORG_SLUG} --manifest-path /tmp/manifest.json" - if (region = get_env("DEPLOY_APP_REGION")) + if (region = APP_REGION) cmd += " --region #{region}" end if (internal_port = get_env("DEPLOY_APP_INTERNAL_PORT")) @@ -243,6 +262,31 @@ def get_env(name) artifact Artifact::DOCKER_IMAGE, image_ref end + + +if FLY_PG_PROVIDER + in_step Step::FLY_POSTGRES_CREATE do + cmd = "flyctl pg create --flex --config-name #{PG_FLY_CONFIG} --org #{ORG_SLUG}" + + + pg_name = PG_NAME + pg_name ||= "#{APP_NAME}-db-#{SecureRandom.hex(2)}" + + cmd += " --name #{pg_name}" + + region = PG_REGION + region ||= APP_REGION + + cmd += " --region #{region}" if region + + artifact Artifact::FLY_POSTGRES, { name: pg_name, region: region, config: PG_FLY_CONFIG } + + exec_capture(cmd) + + exec_capture("flyctl pg attach #{pg_name} --app #{APP_NAME} -y") + end +end + if DEPLOY_NOW in_step Step::DEPLOY do exec_capture("flyctl deploy -a #{APP_NAME} -c /tmp/fly.json --image #{image_ref}") diff --git a/internal/command/postgres/create.go b/internal/command/postgres/create.go index a813eaf1ae..0163060740 100644 --- a/internal/command/postgres/create.go +++ b/internal/command/postgres/create.go @@ -87,6 +87,10 @@ func newCreate() *cobra.Command { Description: "Automatically start a stopped Postgres app when a network request is received", Default: false, }, + flag.String{ + Name: "config-name", + Description: "Configuration name to use for sizing", + }, ) return cmd @@ -280,41 +284,49 @@ func CreateCluster(ctx context.Context, org *fly.Organization, region *fly.Regio ForkFrom: params.ForkFrom, } - customConfig := params.DiskGb != 0 || params.VMSize != "" || params.InitialClusterSize != 0 || params.ScaleToZero != nil - var config *PostgresConfiguration + customConfig := false - if !customConfig { - fmt.Fprintf(io.Out, "For pricing information visit: https://fly.io/docs/about/pricing/#postgresql-clusters") + configName := flag.GetString(ctx, "config-name") + if conf, ok := flexConfigs[configName]; ok { + config = &conf + customConfig = false + } else { - msg := "Select configuration:" - configurations := postgresConfigurations(input.Manager) - var selected int + customConfig = params.DiskGb != 0 || params.VMSize != "" || params.InitialClusterSize != 0 || params.ScaleToZero != nil - options := []string{} - for i, cfg := range configurations { - options = append(options, cfg.Description) - if selected == 0 && !strings.HasPrefix(cfg.Description, "Dev") { - selected = i - } - } + if !customConfig { + fmt.Fprintf(io.Out, "For pricing information visit: https://fly.io/docs/about/pricing/#postgresql-clusters") - if err := prompt.Select(ctx, &selected, msg, configurations[selected].Description, options...); err != nil { - return err - } - config = &postgresConfigurations(input.Manager)[selected] + msg := "Select configuration:" + configurations := postgresConfigurations(input.Manager) + var selected int - if input.Manager == flypg.ReplicationManager && config.VMSize == "shared-cpu-1x" { - confirm, err := prompt.Confirm(ctx, "Scale single node pg to zero after one hour?") - if err != nil { + options := []string{} + for i, cfg := range configurations { + options = append(options, cfg.Description) + if selected == 0 && !strings.HasPrefix(cfg.Description, "Dev") { + selected = i + } + } + + if err := prompt.Select(ctx, &selected, msg, configurations[selected].Description, options...); err != nil { return err } - input.ScaleToZero = confirm - } + config = &postgresConfigurations(input.Manager)[selected] + + if input.Manager == flypg.ReplicationManager && config.VMSize == "shared-cpu-1x" { + confirm, err := prompt.Confirm(ctx, "Scale single node pg to zero after one hour?") + if err != nil { + return err + } + input.ScaleToZero = confirm + } - if config.VMSize == "" { - // User has opted into choosing a custom configuration. - customConfig = true + if config.VMSize == "" { + // User has opted into choosing a custom configuration. + customConfig = true + } } } @@ -464,37 +476,45 @@ func stolonConfigurations() []PostgresConfiguration { } } +var flexConfigs = map[string]PostgresConfiguration{ + "dev": { + Description: "Development - Single node, 1x shared CPU, 256MB RAM, 1GB disk", + DiskGb: 1, + InitialClusterSize: 1, + MemoryMb: 256, + VMSize: "shared-cpu-1x", + }, + "prod_sm": { + Description: "Production (High Availability) - 3 nodes, 2x shared CPUs, 4GB RAM, 40GB disk", + DiskGb: 40, + InitialClusterSize: 3, + MemoryMb: 4096, + VMSize: "shared-cpu-2x", + }, + "prod_lg": { + Description: "Production (High Availability) - 3 nodes, 4x shared CPUs, 8GB RAM, 80GB disk", + DiskGb: 80, + InitialClusterSize: 3, + MemoryMb: 8192, + VMSize: "shared-cpu-4x", + }, + "custom": { + Description: "Specify custom configuration", + DiskGb: 0, + InitialClusterSize: 0, + MemoryMb: 0, + VMSize: "", + }, +} + func flexConfigurations() []PostgresConfiguration { - return []PostgresConfiguration{ - { - Description: "Development - Single node, 1x shared CPU, 256MB RAM, 1GB disk", - DiskGb: 1, - InitialClusterSize: 1, - MemoryMb: 256, - VMSize: "shared-cpu-1x", - }, - { - Description: "Production (High Availability) - 3 nodes, 2x shared CPUs, 4GB RAM, 40GB disk", - DiskGb: 40, - InitialClusterSize: 3, - MemoryMb: 4096, - VMSize: "shared-cpu-2x", - }, - { - Description: "Production (High Availability) - 3 nodes, 4x shared CPUs, 8GB RAM, 80GB disk", - DiskGb: 80, - InitialClusterSize: 3, - MemoryMb: 8192, - VMSize: "shared-cpu-4x", - }, - { - Description: "Specify custom configuration", - DiskGb: 0, - InitialClusterSize: 0, - MemoryMb: 0, - VMSize: "", - }, + var configs = []PostgresConfiguration{} + + for _, conf := range flexConfigs { + configs = append(configs, conf) } + + return configs } // machineVMSizes represents the available VM configurations for Machines. From 0897f2de40ef94f93189390c50778dc93959624b Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Sun, 4 Aug 2024 11:52:36 -0400 Subject: [PATCH 011/104] trying to allow multiple ruby versions, but failing. wip commit --- deploy.rb | 19 +++++++++++++++---- deployer.Dockerfile | 2 +- internal/command/launch/generate.go | 5 +++++ 3 files changed, 21 insertions(+), 5 deletions(-) diff --git a/deploy.rb b/deploy.rb index 856406c49a..38c5abd9be 100755 --- a/deploy.rb +++ b/deploy.rb @@ -5,6 +5,7 @@ require 'open3' require 'uri' require 'securerandom' +require 'fileutils' LOG_PREFIX = ENV["LOG_PREFIX"] @@ -220,12 +221,17 @@ def get_env(name) manifest = in_step Step::PLAN do cmd = "flyctl launch generate -a #{APP_NAME} -o #{ORG_SLUG} --manifest-path /tmp/manifest.json" + if (region = APP_REGION) cmd += " --region #{region}" end + if (internal_port = get_env("DEPLOY_APP_INTERNAL_PORT")) cmd += " --internal-port #{internal_port}" end + + cmd += " --copy-config" if get_env("DEPLOY_COPY_CONFIG") + exec_capture(cmd) manifest = JSON.parse(File.read("/tmp/manifest.json")) @@ -255,15 +261,20 @@ def get_env(name) File.write("/tmp/fly.json", manifest["config"].to_json) image_tag = SecureRandom.hex(16) -image_ref = "registry.fly.io/#{APP_NAME}:#{image_tag}" -in_step Step::BUILD do +image_ref = in_step Step::BUILD do + if (image_ref = manifest.dig("config","build","image")&.strip) && !image_ref.nil? && !image_ref.empty? + info("Skipping build, using image defined in fly config: #{image_ref}") + return image_ref + end + + image_ref = "registry.fly.io/#{APP_NAME}:#{image_tag}" + exec_capture("flyctl deploy -a #{APP_NAME} -c /tmp/fly.json --build-only --push --image-label #{image_tag}") artifact Artifact::DOCKER_IMAGE, image_ref + image_ref end - - if FLY_PG_PROVIDER in_step Step::FLY_POSTGRES_CREATE do cmd = "flyctl pg create --flex --config-name #{PG_FLY_CONFIG} --org #{ORG_SLUG}" diff --git a/deployer.Dockerfile b/deployer.Dockerfile index fb3a63c247..afc1e25e04 100644 --- a/deployer.Dockerfile +++ b/deployer.Dockerfile @@ -1,6 +1,6 @@ FROM debian:bookworm -RUN apt update && apt install -y --no-install-recommends ruby git curl clang g++ libncurses5 libncurses-dev libncurses5-dev make unzip locales openssl libssl-dev +RUN apt update && apt install -y --no-install-recommends ruby ruby-bundler git curl clang g++ libncurses5 libncurses-dev libncurses5-dev make unzip locales openssl libssl-dev # Erlang + Elixir COPY --from=hexpm/elixir:1.17.2-erlang-27.0.1-debian-bookworm-20240722-slim /usr/local/bin/ /usr/local/bin diff --git a/internal/command/launch/generate.go b/internal/command/launch/generate.go index c3c52630e6..06e498d219 100644 --- a/internal/command/launch/generate.go +++ b/internal/command/launch/generate.go @@ -38,6 +38,11 @@ func newGenerate() *cobra.Command { Description: "Path to write the manifest to", Default: "manifest.json", }, + flag.Bool{ + Name: "copy-config", + Description: "Use the configuration file if present without prompting", + Default: false, + }, ) // not that useful anywhere else yet From 9a4c2688566f8c6b0f7542bd17f1eab40745bcca Mon Sep 17 00:00:00 2001 From: lubien Date: Tue, 6 Aug 2024 09:45:57 -0300 Subject: [PATCH 012/104] Use dev envs for elixir and nodejs --- deployer.Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deployer.Dockerfile b/deployer.Dockerfile index afc1e25e04..1b1c835248 100644 --- a/deployer.Dockerfile +++ b/deployer.Dockerfile @@ -8,13 +8,13 @@ COPY --from=hexpm/elixir:1.17.2-erlang-27.0.1-debian-bookworm-20240722-slim /usr COPY --from=hexpm/elixir:1.17.2-erlang-27.0.1-debian-bookworm-20240722-slim /usr/local/lib/erlang/ /usr/local/lib/erlang # Ensure you have everything compiled so fly launch works RUN mix local.hex --force && mix local.rebar --force -ENV MIX_ENV=prod +ENV MIX_ENV=dev # Node.js COPY --from=node:22-bookworm /usr/local/bin/ /usr/local/bin COPY --from=node:22-bookworm /usr/local/lib/node_modules/ /usr/local/lib/node_modules COPY --from=node:22-bookworm /opt/yarn-v1.22.22/ /opt/yarn-v1.22.22 -ENV NODE_ENV=production +ENV NODE_ENV=development COPY bin/flyctl /usr/local/bin/flyctl COPY deploy.rb /deploy.rb From c01b27504f78894f1303f93dd052e3e79e84d544 Mon Sep 17 00:00:00 2001 From: lubien Date: Tue, 6 Aug 2024 09:46:33 -0300 Subject: [PATCH 013/104] Fix support for [build.image] --- deploy.rb | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/deploy.rb b/deploy.rb index 38c5abd9be..e2f73336a8 100755 --- a/deploy.rb +++ b/deploy.rb @@ -265,14 +265,14 @@ def get_env(name) image_ref = in_step Step::BUILD do if (image_ref = manifest.dig("config","build","image")&.strip) && !image_ref.nil? && !image_ref.empty? info("Skipping build, using image defined in fly config: #{image_ref}") - return image_ref - end - - image_ref = "registry.fly.io/#{APP_NAME}:#{image_tag}" + image_ref + else + image_ref = "registry.fly.io/#{APP_NAME}:#{image_tag}" - exec_capture("flyctl deploy -a #{APP_NAME} -c /tmp/fly.json --build-only --push --image-label #{image_tag}") - artifact Artifact::DOCKER_IMAGE, image_ref - image_ref + exec_capture("flyctl deploy -a #{APP_NAME} -c /tmp/fly.json --build-only --push --image-label #{image_tag}") + artifact Artifact::DOCKER_IMAGE, image_ref + image_ref + end end if FLY_PG_PROVIDER From 449886ef5d7014c2adba966897ae6f62773e346e Mon Sep 17 00:00:00 2001 From: lubien Date: Tue, 6 Aug 2024 10:43:06 -0300 Subject: [PATCH 014/104] Fix typo --- internal/command/launch/generate.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/command/launch/generate.go b/internal/command/launch/generate.go index 06e498d219..d44496c7d6 100644 --- a/internal/command/launch/generate.go +++ b/internal/command/launch/generate.go @@ -87,7 +87,7 @@ func runGenerate(ctx context.Context) error { return err } - if err := state.satisfyScannerBeforeDb(ctx); err != nil { + if err = state.satisfyScannerAfterDb(ctx); err != nil { return err } From fc9b7ff16e46f233d019aabe48475810e75cee2b Mon Sep 17 00:00:00 2001 From: lubien Date: Wed, 7 Aug 2024 09:21:41 -0300 Subject: [PATCH 015/104] Maybe fix secrets setting --- deploy.rb | 2 +- internal/command/launch/generate.go | 11 +++++++++++ internal/command/launch/plan_builder.go | 12 ++++++------ 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/deploy.rb b/deploy.rb index e2f73336a8..593c4fe38f 100755 --- a/deploy.rb +++ b/deploy.rb @@ -220,7 +220,7 @@ def get_env(name) end manifest = in_step Step::PLAN do - cmd = "flyctl launch generate -a #{APP_NAME} -o #{ORG_SLUG} --manifest-path /tmp/manifest.json" + cmd = "flyctl launch generate --force-name -a #{APP_NAME} --name #{APP_NAME} -o #{ORG_SLUG} --manifest-path /tmp/manifest.json" if (region = APP_REGION) cmd += " --region #{region}" diff --git a/internal/command/launch/generate.go b/internal/command/launch/generate.go index d44496c7d6..d96f736eb2 100644 --- a/internal/command/launch/generate.go +++ b/internal/command/launch/generate.go @@ -22,6 +22,17 @@ func newGenerate() *cobra.Command { flag.Region(), flag.Org(), flag.AppConfig(), + flag.String{ + Name: "name", + Description: `Name of the new app`, + }, + // don't try to generate a name + flag.Bool{ + Name: "force-name", + Description: "Force app name supplied by --name", + Default: false, + Hidden: true, + }, flag.Int{ Name: "internal-port", Description: "Set internal_port for all services in the generated fly.toml", diff --git a/internal/command/launch/plan_builder.go b/internal/command/launch/plan_builder.go index 2f19333488..7f1e45b95a 100644 --- a/internal/command/launch/plan_builder.go +++ b/internal/command/launch/plan_builder.go @@ -153,12 +153,6 @@ func buildManifest(ctx context.Context, recoverableErrors *recoverableErrorBuild } configPath := filepath.Join(workingDir, appconfig.DefaultConfigFileName) - var srcInfo *scanner.SourceInfo - srcInfo, appConfig.Build, err = determineSourceInfo(ctx, appConfig, copiedConfig, workingDir) - if err != nil { - return nil, nil, err - } - appName, appNameExplanation, err := determineAppName(ctx, appConfig, configPath) if err != nil { if err := recoverableErrors.tryRecover(err); err != nil { @@ -166,6 +160,12 @@ func buildManifest(ctx context.Context, recoverableErrors *recoverableErrorBuild } } + var srcInfo *scanner.SourceInfo + srcInfo, appConfig.Build, err = determineSourceInfo(ctx, appConfig, copiedConfig, workingDir) + if err != nil { + return nil, nil, err + } + compute, computeExplanation, err := determineCompute(ctx, appConfig, srcInfo) if err != nil { if err := recoverableErrors.tryRecover(err); err != nil { From a98b4ab63a5429bd6e4fbc0f2f3baf9f4c2c787e Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 12 Aug 2024 14:52:06 -0400 Subject: [PATCH 016/104] add launch sessions subcommands, adjust deploy script to use them --- deploy.rb | 90 +++------ internal/command/launch/cmd.go | 1 + internal/command/launch/sessions.go | 273 ++++++++++++++++++++++++++++ 3 files changed, 299 insertions(+), 65 deletions(-) create mode 100644 internal/command/launch/sessions.go diff --git a/deploy.rb b/deploy.rb index 593c4fe38f..65e90798e9 100755 --- a/deploy.rb +++ b/deploy.rb @@ -13,6 +13,7 @@ module Step ROOT = :__root__ GIT_PULL = :git_pull PLAN = :plan + CUSTOMIZE = :customize BUILD = :build FLY_POSTGRES_CREATE = :fly_postgres_create DEPLOY = :deploy @@ -23,6 +24,7 @@ module Artifact GIT_INFO = :git_info GIT_HEAD = :git_head MANIFEST = :manifest + SESSION = :session DIFF = :diff FLY_POSTGRES = :fly_postgres DOCKER_IMAGE = :docker_image @@ -140,18 +142,6 @@ def get_env(name) event :start, { ts: ts() } -APP_NAME = get_env("DEPLOY_APP_NAME") -if !APP_NAME - event :error, { type: :validation, message: "missing app name" } - exit 1 -end - -ORG_SLUG = get_env("DEPLOY_ORG_SLUG") -if !ORG_SLUG - event :error, { type: :validation, message: "missing organization slug" } - exit 1 -end - GIT_REPO = get_env("GIT_REPO") GIT_REPO_URL = if GIT_REPO @@ -170,28 +160,14 @@ def get_env(name) repo_url end -PG_PROVIDER = get_env("DEPLOY_PG_PROVIDER") -FLY_PG_PROVIDER = PG_PROVIDER == "fly_postgres" - -PG_NAME = get_env("DEPLOY_PG_NAME") -PG_FLY_CONFIG = get_env("DEPLOY_PG_FLY_CONFIG") -PG_REGION = get_env("DEPLOY_PG_REGION") - -if FLY_PG_PROVIDER - if !PG_FLY_CONFIG - event :error, { type: :validation, message: "Missing DEPLOY_PG_FLY_CONFIG" } - exit 1 - end -end - DEPLOY_NOW = !get_env("DEPLOY_NOW").nil? steps = [] steps.push({id: Step::GIT_PULL, description: "Setup and pull from git repository"}) if GIT_REPO -steps.push({id: Step::PLAN, description: "Plan deployment"}) +steps.push({id: Step::PLAN, description: "Prepare deployment plan"}) +steps.push({id: Step::CUSTOMIZE, description: "Customize deployment plan"}) steps.push({id: Step::BUILD, description: "Build image"}) if GIT_REPO -steps.push({id: Step::FLY_POSTGRES_CREATE, description: "Create and attach PostgreSQL database"}) if FLY_PG_PROVIDER steps.push({id: Step::DEPLOY, description: "Deploy application"}) if DEPLOY_NOW artifact Artifact::META, { steps: steps } @@ -219,8 +195,8 @@ def get_env(name) end end -manifest = in_step Step::PLAN do - cmd = "flyctl launch generate --force-name -a #{APP_NAME} --name #{APP_NAME} -o #{ORG_SLUG} --manifest-path /tmp/manifest.json" +session = in_step Step::PLAN do + cmd = "flyctl launch sessions create --session-path /tmp/session.json --manifest-path /tmp/manifest.json" if (region = APP_REGION) cmd += " --region #{region}" @@ -233,33 +209,35 @@ def get_env(name) cmd += " --copy-config" if get_env("DEPLOY_COPY_CONFIG") exec_capture(cmd) + session = JSON.parse(File.read("/tmp/session.json")) manifest = JSON.parse(File.read("/tmp/manifest.json")) - vm_cpu_kind = ENV.fetch("DEPLOY_VM_CPU_KIND", manifest["plan"]["vm_cpu_kind"]) - vm_cpus = ENV.fetch("DEPLOY_VM_CPUS", manifest["plan"]["vm_cpus"]) - vm_memory = ENV.fetch("DEPLOY_VM_MEMORY", manifest["plan"]["vm_memory"]) - vm_size = ENV.fetch("DEPLOY_VM_SIZE", manifest["plan"]["vm_size"]) - - # override this to be sure... - manifest["config"]["vm"] = [{ - size: vm_size, - memory: vm_memory, - cpu_kind: vm_cpu_kind, - cpus: vm_cpus.to_i - }] - artifact Artifact::MANIFEST, manifest + artifact Artifact::SESSION, session exec_capture("git add -A") diff = exec_capture("git diff --cached") artifact Artifact::DIFF, diff + session +end + +manifest = in_step Step::CUSTOMIZE do + cmd = "flyctl launch sessions finalize --session-path /tmp/session.json --manifest-path /tmp/manifest.json" + + exec_capture(cmd) + manifest = JSON.parse(File.read("/tmp/manifest.json")) + + artifact Artifact::MANIFEST, manifest + manifest end # Write the fly config file to a tmp directory File.write("/tmp/fly.json", manifest["config"].to_json) +APP_NAME = manifest["config"]["app"] + image_tag = SecureRandom.hex(16) image_ref = in_step Step::BUILD do @@ -275,28 +253,10 @@ def get_env(name) end end -if FLY_PG_PROVIDER - in_step Step::FLY_POSTGRES_CREATE do - cmd = "flyctl pg create --flex --config-name #{PG_FLY_CONFIG} --org #{ORG_SLUG}" - - - pg_name = PG_NAME - pg_name ||= "#{APP_NAME}-db-#{SecureRandom.hex(2)}" - - cmd += " --name #{pg_name}" - - region = PG_REGION - region ||= APP_REGION - - cmd += " --region #{region}" if region - - artifact Artifact::FLY_POSTGRES, { name: pg_name, region: region, config: PG_FLY_CONFIG } - - exec_capture(cmd) - - exec_capture("flyctl pg attach #{pg_name} --app #{APP_NAME} -y") - end -end +# TODO: Setup Postgres if defined +# TODO: Setup Upstash if defined +# TODO: Setup Tigris if defined +# TODO: Setup Sentry if defined if DEPLOY_NOW in_step Step::DEPLOY do diff --git a/internal/command/launch/cmd.go b/internal/command/launch/cmd.go index 92b5f909a7..b745a3a76d 100644 --- a/internal/command/launch/cmd.go +++ b/internal/command/launch/cmd.go @@ -137,6 +137,7 @@ func New() (cmd *cobra.Command) { ) cmd.AddCommand(newGenerate()) + cmd.AddCommand(newSessions()) return } diff --git a/internal/command/launch/sessions.go b/internal/command/launch/sessions.go new file mode 100644 index 0000000000..3ed5a4a48f --- /dev/null +++ b/internal/command/launch/sessions.go @@ -0,0 +1,273 @@ +package launch + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + "time" + + "github.com/logrusorgru/aurora" + "github.com/spf13/cobra" + fly "github.com/superfly/fly-go" + "github.com/superfly/flyctl/helpers" + "github.com/superfly/flyctl/internal/command" + "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/logger" + state2 "github.com/superfly/flyctl/internal/state" + "github.com/superfly/flyctl/iostreams" +) + +func newSessions() *cobra.Command { + sessDesc := "manage launch sessions" + cmd := command.New("sessions", sessDesc, sessDesc, nil) + // not that useful anywhere else yet + cmd.Hidden = true + + createDesc := "create a new launch session" + createCmd := command.New("create", createDesc, createDesc, runSessionCreate, command.LoadAppConfigIfPresent) + + flag.Add(createCmd, + flag.App(), + flag.Region(), + flag.Org(), + flag.AppConfig(), + flag.String{ + Name: "name", + Description: `Name of the new app`, + }, + // don't try to generate a name + flag.Bool{ + Name: "force-name", + Description: "Force app name supplied by --name", + Default: false, + Hidden: true, + }, + flag.Int{ + Name: "internal-port", + Description: "Set internal_port for all services in the generated fly.toml", + Default: -1, + }, + flag.Bool{ + Name: "ha", + Description: "Create spare machines that increases app availability", + Default: false, + }, + flag.String{ + Name: "session-path", + Description: "Path to write the session info to", + Default: "session.json", + }, + flag.String{ + Name: "manifest-path", + Description: "Path to write the manifest info to", + Default: "manifest.json", + }, + flag.Bool{ + Name: "copy-config", + Description: "Use the configuration file if present without prompting", + Default: false, + }, + ) + + // not that useful anywhere else yet + createCmd.Hidden = true + + finalizeDesc := "finalize a launch session" + finalizeCmd := command.New("finalize", finalizeDesc, finalizeDesc, runSessionFinalize, command.LoadAppConfigIfPresent) + + flag.Add(finalizeCmd, + flag.App(), + flag.Region(), + flag.Org(), + flag.AppConfig(), + flag.String{ + Name: "session-path", + Description: "Path to write the session info to", + Default: "session.json", + }, + flag.String{ + Name: "manifest-path", + Description: "Path to write the manifest info to", + Default: "manifest.json", + }, + ) + + // not that useful anywhere else yet + finalizeCmd.Hidden = true + + cmd.AddCommand(createCmd, finalizeCmd) + + return cmd +} + +func runSessionCreate(ctx context.Context) (err error) { + io := iostreams.FromContext(ctx) + + recoverableErrors := recoverableErrorBuilder{canEnterUi: false} + launchManifest, planBuildCache, err := buildManifest(ctx, nil, &recoverableErrors) + if err != nil { + return err + } + + updateConfig(launchManifest.Plan, nil, launchManifest.Config) + if n := flag.GetInt(ctx, "internal-port"); n > 0 { + launchManifest.Config.SetInternalPort(n) + } + + manifestPath := flag.GetString(ctx, "manifest-path") + + file, err := os.Create(manifestPath) + if err != nil { + return err + } + defer file.Close() + + jsonEncoder := json.NewEncoder(file) + jsonEncoder.SetIndent("", " ") + + if err := jsonEncoder.Encode(launchManifest); err != nil { + return err + } + + file.Close() + + state := &launchState{ + workingDir: ".", + configPath: "fly.json", + LaunchManifest: *launchManifest, + env: map[string]string{}, + planBuildCache: *planBuildCache, + cache: map[string]interface{}{}, + } + + if errors := recoverableErrors.build(); errors != "" { + fmt.Fprintf(io.ErrOut, "\n%s\n%s\n", aurora.Reverse(aurora.Red("Problems encountered that can fixed from user interaction:")), errors) + } + + session, err := fly.StartCLISession(fmt.Sprintf("%s: %s", state2.Hostname(ctx), state.Plan.AppName), map[string]any{ + "target": "launch", + "metadata": state.Plan, + }) + if err != nil { + return err + } + + sessionPath := flag.GetString(ctx, "session-path") + + file, err = os.Create(sessionPath) + if err != nil { + return err + } + defer file.Close() + + jsonEncoder = json.NewEncoder(file) + jsonEncoder.SetIndent("", " ") + + if err := jsonEncoder.Encode(session); err != nil { + return err + } + + return nil +} + +func runSessionFinalize(ctx context.Context) (err error) { + io := iostreams.FromContext(ctx) + logger := logger.FromContext(ctx) + + sessionBytes, err := os.ReadFile(flag.GetString(ctx, "session-path")) + if err != nil { + return err + } + + var session fly.CLISession + if err := json.Unmarshal(sessionBytes, &session); err != nil { + return err + } + + manifestBytes, err := os.ReadFile(flag.GetString(ctx, "manifest-path")) + if err != nil { + return err + } + + var launchManifest LaunchManifest + if err := json.Unmarshal(manifestBytes, &launchManifest); err != nil { + return err + } + + planBuildCache := planBuildCache{ + appConfig: launchManifest.Config, + sourceInfo: nil, + appNameValidated: true, + warnedNoCcHa: true, + } + + // FIXME: better timeout here + ctx, cancel := context.WithTimeout(ctx, 15*time.Minute) + defer cancel() + + finalSession, err := waitForCLISession(ctx, logger, io.ErrOut, session.ID) + switch { + case errors.Is(err, context.DeadlineExceeded): + return errors.New("session expired, please try again") + case err != nil: + return err + } + + // Hack because somewhere from between UI and here, the numbers get converted to strings + if err := patchNumbers(finalSession.Metadata, "vm_cpus", "vm_memory"); err != nil { + return err + } + + // Wasteful, but gets the job done without uprooting the session types. + // Just round-trip the map[string]interface{} back into json, so we can re-deserialize it into a complete type. + metaJson, err := json.Marshal(finalSession.Metadata) + if err != nil { + return err + } + + state := &launchState{ + workingDir: ".", + configPath: "fly.json", + LaunchManifest: launchManifest, + env: map[string]string{}, + planBuildCache: planBuildCache, + cache: map[string]interface{}{}, + } + + oldPlan := helpers.Clone(state.Plan) + + err = json.Unmarshal(metaJson, &state.Plan) + if err != nil { + return err + } + + // Patch in some fields that we keep in the plan that aren't persisted by the UI. + // Technically, we should probably just be persisting this, but there's + // no clear value to the UI having these fields currently. + if _, ok := finalSession.Metadata["ha"]; !ok { + state.Plan.HighAvailability = oldPlan.HighAvailability + } + // This should never be changed by the UI!! + state.Plan.ScannerFamily = oldPlan.ScannerFamily + + manifestPath := flag.GetString(ctx, "manifest-path") + + file, err := os.Create(manifestPath) + if err != nil { + return err + } + defer file.Close() + + jsonEncoder := json.NewEncoder(file) + jsonEncoder.SetIndent("", " ") + + if err := jsonEncoder.Encode(state.LaunchManifest); err != nil { + return err + } + + file.Close() + + return nil +} From 07d5b9c9049e9691685e56e47c3a9f3ba283cf55 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Tue, 13 Aug 2024 10:36:31 -0400 Subject: [PATCH 017/104] create all extensions from a launch manifest --- deploy.rb | 139 ++++++++++++++++++++++++++-- internal/command/postgres/create.go | 5 + 2 files changed, 134 insertions(+), 10 deletions(-) diff --git a/deploy.rb b/deploy.rb index 65e90798e9..f1cedcb171 100755 --- a/deploy.rb +++ b/deploy.rb @@ -16,6 +16,10 @@ module Step CUSTOMIZE = :customize BUILD = :build FLY_POSTGRES_CREATE = :fly_postgres_create + SUPABASE_POSTGRES = :supabase_postgres + UPSTASH_REDIS = :upstash_redis + TIGRIS_OBJECT_STORAGE = :tigris_object_storage + SENTRY = :sentry DEPLOY = :deploy end @@ -27,6 +31,10 @@ module Artifact SESSION = :session DIFF = :diff FLY_POSTGRES = :fly_postgres + SUPABASE_POSTGRES = :supabase_postgres + UPSTASH_REDIS = :upstash_redis + TIGRIS_OBJECT_STORAGE = :tigris_object_storage + SENTRY = :sentry DOCKER_IMAGE = :docker_image end @@ -78,8 +86,8 @@ def error(msg) log("error", msg) end -def exec_capture(cmd) - event :exec, { cmd: cmd } +def exec_capture(cmd, display = nil) + event :exec, { cmd: display || cmd } out_mutex = Mutex.new output = "" @@ -142,6 +150,12 @@ def get_env(name) event :start, { ts: ts() } +ORG_SLUG = get_env("DEPLOY_ORG_SLUG") +if !ORG_SLUG + event :error, { type: :validation, message: "missing organization slug" } + exit 1 +end + GIT_REPO = get_env("GIT_REPO") GIT_REPO_URL = if GIT_REPO @@ -168,9 +182,6 @@ def get_env(name) steps.push({id: Step::PLAN, description: "Prepare deployment plan"}) steps.push({id: Step::CUSTOMIZE, description: "Customize deployment plan"}) steps.push({id: Step::BUILD, description: "Build image"}) if GIT_REPO -steps.push({id: Step::DEPLOY, description: "Deploy application"}) if DEPLOY_NOW - -artifact Artifact::META, { steps: steps } APP_REGION = get_env("DEPLOY_APP_REGION") @@ -182,7 +193,11 @@ def get_env(name) exec_capture("git init") - exec_capture("git remote add origin #{GIT_REPO_URL.to_s}") + redacted_repo_url = GIT_REPO_URL.dup + redacted_repo_url.user = nil + redacted_repo_url.password = nil + + exec_capture("git remote add origin #{GIT_REPO_URL.to_s}", "git remote add origin #{redacted_repo_url.to_s}") ref = exec_capture("git remote show origin | sed -n '/HEAD branch/s/.*: //p'").chomp if !ref @@ -238,6 +253,22 @@ def get_env(name) APP_NAME = manifest["config"]["app"] +FLY_PG = manifest.dig("plan", "postgres", "fly_postgres") +SUPABASE = manifest.dig("plan", "postgres", "supabase_postgres") +UPSTASH = manifest.dig("plan", "redis", "upstash_redis") +TIGRIS = manifest.dig("plan", "object_storage", "tigris_object_storage") +SENTRY = manifest.dig("plan", "sentry") == true + +steps.push({id: Step::FLY_POSTGRES_CREATE, description: "Create and attach PostgreSQL database"}) if FLY_PG +steps.push({id: Step::SUPABASE_POSTGRES, description: "Create Supabase PostgreSQL database"}) if SUPABASE +steps.push({id: Step::UPSTASH_REDIS, description: "Create Upstash Redis database"}) if UPSTASH +steps.push({id: Step::TIGRIS_OBJECT_STORAGE, description: "Create Tigris object storage bucket"}) if TIGRIS +steps.push({id: Step::SENTRY, description: "Create Sentry project"}) if SENTRY + +steps.push({id: Step::DEPLOY, description: "Deploy application"}) if DEPLOY_NOW + +artifact Artifact::META, { steps: steps } + image_tag = SecureRandom.hex(16) image_ref = in_step Step::BUILD do @@ -253,10 +284,98 @@ def get_env(name) end end -# TODO: Setup Postgres if defined -# TODO: Setup Upstash if defined -# TODO: Setup Tigris if defined -# TODO: Setup Sentry if defined +if FLY_PG + in_step Step::FLY_POSTGRES_CREATE do + pg_name = FLY_PG["app_name"] + region = APP_REGION + + cmd = "flyctl pg create --flex --org #{ORG_SLUG} --name #{pg_name} --region #{region} --yes" + + if (vm_size = FLY_PG["vm_size"]) + cmd += " --vm-size #{vm_size}" + end + + if (vm_memory = FLY_PG["vm_ram"]) + cmd += " --vm-memory #{vm_memory}" + end + + if (nodes = FLY_PG["nodes"]) + cmd += " --initial-cluster-size #{nodes}" + end + + if (disk_size_gb = FLY_PG["disk_size_gb"]) + cmd += " --volume-size #{disk_size_gb}" + end + + artifact Artifact::FLY_POSTGRES, { name: pg_name, region: region, config: FLY_PG } + + exec_capture(cmd) + + exec_capture("flyctl pg attach #{pg_name} --app #{APP_NAME} -y") + end +elsif SUPABASE + in_step Step::SUPABASE_POSTGRES do + cmd = "flyctl ext supabase create --org #{ORG_SLUG} --name #{SUPABASE["db_name"]} --region #{SUPABASE["region"]} --app #{APP_NAME} --yes" + + artifact Artifact::SUPABASE_POSTGRES, { config: SUPABASE } + + exec_capture(cmd) + end +end + +if UPSTASH + in_step Step::UPSTASH_REDIS do + db_name = "#{APP_NAME}-redis" + + cmd = "flyctl redis create --name #{db_name} --org #{ORG_SLUG} --region #{APP_REGION} --yes" + + if UPSTASH["eviction"] == true + cmd += " --enable-eviction" + elsif UPSTASH["eviction"] == false + cmd += " --disable-eviction" + end + + if (regions = UPSTASH["regions"]) + cmd += " --replica-regions #{regions.join(",")}" + end + + artifact Artifact::UPSTASH_REDIS, { config: UPSTASH, region: APP_REGION, name: db_name } + + exec_capture(cmd) + end +end + +if TIGRIS + in_step Step::TIGRIS_OBJECT_STORAGE do + cmd = "flyctl ext tigris create --org #{ORG_SLUG} --app #{APP_NAME} --yes" + + if (name = TIGRIS["name"]) && !name.empty? + cmd += " --name #{name}" + end + + if (pub = TIGRIS["public"]) && pub == true + cmd += " --public" + end + + if (accel = TIGRIS["accelerate"]) && accel == true + cmd += " --accelerate" + end + + if (domain = TIGRIS["website_domain_name"]) && !domain.empty? + cmd += " --website-domain-name #{domain}" + end + + artifact Artifact::TIGRIS_OBJECT_STORAGE, { config: TIGRIS } + + exec_capture(cmd) + end +end + +if SENTRY + in_step Step::SENTRY do + exec_capture("flyctl ext sentry create --app #{APP_NAME} --yes") + end +end if DEPLOY_NOW in_step Step::DEPLOY do diff --git a/internal/command/postgres/create.go b/internal/command/postgres/create.go index 82eb191199..b8f530a4e0 100644 --- a/internal/command/postgres/create.go +++ b/internal/command/postgres/create.go @@ -52,6 +52,10 @@ func newCreate() *cobra.Command { Name: "vm-size", Description: "the size of the VM", }, + flag.Int{ + Name: "vm-memory", + Description: "the memory of the VM in MB", + }, flag.Int{ Name: "initial-cluster-size", Description: "Initial cluster size", @@ -139,6 +143,7 @@ func run(ctx context.Context) (err error) { InitialClusterSize: flag.GetInt(ctx, "initial-cluster-size"), ImageRef: flag.GetString(ctx, "image-ref"), DiskGb: flag.GetInt(ctx, "volume-size"), + MemoryMb: flag.GetInt(ctx, "vm-memory"), } forkFrom := flag.GetString(ctx, "fork-from") From 8087cac2ec09d3c3b98799072b7f15d90910237d Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Wed, 14 Aug 2024 08:20:34 -0400 Subject: [PATCH 018/104] update fly config after tweak form resolves, use org slug from final plan --- deploy.rb | 11 +---------- internal/command/launch/sessions.go | 2 ++ 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/deploy.rb b/deploy.rb index f1cedcb171..3a91b965b3 100755 --- a/deploy.rb +++ b/deploy.rb @@ -150,12 +150,6 @@ def get_env(name) event :start, { ts: ts() } -ORG_SLUG = get_env("DEPLOY_ORG_SLUG") -if !ORG_SLUG - event :error, { type: :validation, message: "missing organization slug" } - exit 1 -end - GIT_REPO = get_env("GIT_REPO") GIT_REPO_URL = if GIT_REPO @@ -216,10 +210,6 @@ def get_env(name) if (region = APP_REGION) cmd += " --region #{region}" end - - if (internal_port = get_env("DEPLOY_APP_INTERNAL_PORT")) - cmd += " --internal-port #{internal_port}" - end cmd += " --copy-config" if get_env("DEPLOY_COPY_CONFIG") @@ -252,6 +242,7 @@ def get_env(name) File.write("/tmp/fly.json", manifest["config"].to_json) APP_NAME = manifest["config"]["app"] +ORG_SLUG = manifest["plan"]["org"] FLY_PG = manifest.dig("plan", "postgres", "fly_postgres") SUPABASE = manifest.dig("plan", "postgres", "supabase_postgres") diff --git a/internal/command/launch/sessions.go b/internal/command/launch/sessions.go index 3ed5a4a48f..c99be61436 100644 --- a/internal/command/launch/sessions.go +++ b/internal/command/launch/sessions.go @@ -252,6 +252,8 @@ func runSessionFinalize(ctx context.Context) (err error) { // This should never be changed by the UI!! state.Plan.ScannerFamily = oldPlan.ScannerFamily + updateConfig(state.Plan, nil, state.Config) + manifestPath := flag.GetString(ctx, "manifest-path") file, err := os.Create(manifestPath) From fa7c15422334bd4df0ad4b6858067cff79816b79 Mon Sep 17 00:00:00 2001 From: Sam Ruby Date: Wed, 14 Aug 2024 10:52:26 -0400 Subject: [PATCH 019/104] add runtime to plan for rails, node, and bun --- internal/command/launch/plan/plan.go | 7 +++++++ internal/command/launch/plan_builder.go | 1 + scanner/jsFramework.go | 4 ++++ scanner/rails.go | 13 +++++++++++++ scanner/scanner.go | 1 + 5 files changed, 26 insertions(+) diff --git a/internal/command/launch/plan/plan.go b/internal/command/launch/plan/plan.go index 74dbb46d21..bdb8519f26 100644 --- a/internal/command/launch/plan/plan.go +++ b/internal/command/launch/plan/plan.go @@ -37,6 +37,13 @@ type LaunchPlan struct { ScannerFamily string `json:"scanner_family"` FlyctlVersion version.Version `json:"flyctl_version"` + + Runtime RuntimeStruct `json:"runtime"` +} + +type RuntimeStruct struct { + Language string `json:"language"` + Version string `json:"version"` } // Guest returns the guest described by the *raw* guest fields in a Plan. diff --git a/internal/command/launch/plan_builder.go b/internal/command/launch/plan_builder.go index 1c8dc47327..767f555699 100644 --- a/internal/command/launch/plan_builder.go +++ b/internal/command/launch/plan_builder.go @@ -247,6 +247,7 @@ func buildManifest(ctx context.Context, parentConfig *appconfig.Config, recovera lp.HttpServicePort = srcInfo.Port lp.HttpServicePortSetByScanner = true } + lp.Runtime = srcInfo.Runtime } return &LaunchManifest{ diff --git a/scanner/jsFramework.go b/scanner/jsFramework.go index 6696e65a2a..18dc88860a 100644 --- a/scanner/jsFramework.go +++ b/scanner/jsFramework.go @@ -109,6 +109,8 @@ func configureJsFramework(sourceDir string, config *ScannerConfig) (*SourceInfo, if err != nil || nodeVersion.LT(minVersion) { return nil, nil } + + srcInfo.Runtime = plan.RuntimeStruct{Language: "node", Version: nodeVersionString} } } else { // ensure bun is in $PATH @@ -140,6 +142,8 @@ func configureJsFramework(sourceDir string, config *ScannerConfig) (*SourceInfo, if err != nil || bunVersion.LT(minVersion) { return nil, nil } + + srcInfo.Runtime = plan.RuntimeStruct{Language: "bun", Version: bunVersionString} } // set family diff --git a/scanner/rails.go b/scanner/rails.go index a57d66ba67..ed05d30406 100644 --- a/scanner/rails.go +++ b/scanner/rails.go @@ -82,6 +82,19 @@ func configureRails(sourceDir string, config *ScannerConfig) (*SourceInfo, error AutoInstrumentErrors: true, } + // add ruby version + versionOutput, err := exec.Command("ruby", "--version").Output() + if err == nil { + // Define the regex pattern to extract the version number + re := regexp.MustCompile(`ruby (\d+\.\d+\.\d+)`) + + // Find the version number in the output + matches := re.FindStringSubmatch(string(versionOutput)) + if len(matches) >= 2 { + s.Runtime = plan.RuntimeStruct{Language: "ruby", Version: matches[1]} + } + } + if checksPass(sourceDir, dirContains("Gemfile", "litestack")) { // don't prompt for pg, redis if litestack is in the Gemfile s.DatabaseDesired = DatabaseKindSqlite diff --git a/scanner/scanner.go b/scanner/scanner.go index 3d7aa6b3db..cc91e631cd 100644 --- a/scanner/scanner.go +++ b/scanner/scanner.go @@ -83,6 +83,7 @@ type SourceInfo struct { MergeConfig *MergeConfigStruct AutoInstrumentErrors bool FailureCallback func(err error) error + Runtime plan.RuntimeStruct } type SourceFile struct { From 3153d0b5d6577a129c409186de0c95f407e2b02e Mon Sep 17 00:00:00 2001 From: Sam Ruby Date: Wed, 14 Aug 2024 12:24:03 -0400 Subject: [PATCH 020/104] extract ruby version from .ruby-version or Gemfile Note: no longer abort processing if bundle install fails --- scanner/rails.go | 48 ++++++++++++++++++++++++++++++++++++------------ 1 file changed, 36 insertions(+), 12 deletions(-) diff --git a/scanner/rails.go b/scanner/rails.go index ed05d30406..17eb3dbe3d 100644 --- a/scanner/rails.go +++ b/scanner/rails.go @@ -57,7 +57,7 @@ func configureRails(sourceDir string, config *ScannerConfig) (*SourceInfo, error } } - // verify that the bundle will install before proceeding + // attempt to install bundle before proceeding args := []string{"install"} if checksPass(sourceDir, fileExists("Gemfile.lock")) { @@ -69,10 +69,6 @@ func configureRails(sourceDir string, config *ScannerConfig) (*SourceInfo, error cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - return nil, errors.Wrap(err, "Failed to install bundle, exiting") - } - s := &SourceInfo{ Family: "Rails", Callback: RailsCallback, @@ -83,18 +79,46 @@ func configureRails(sourceDir string, config *ScannerConfig) (*SourceInfo, error } // add ruby version - versionOutput, err := exec.Command("ruby", "--version").Output() - if err == nil { - // Define the regex pattern to extract the version number - re := regexp.MustCompile(`ruby (\d+\.\d+\.\d+)`) - // Find the version number in the output - matches := re.FindStringSubmatch(string(versionOutput)) + var rubyVersion string + + // add ruby version from .ruby-version file + versionFile, err := os.ReadFile(".ruby-version") + if err == nil { + re := regexp.MustCompile(`ruby-(\d+\.\d+\.\d+)`) + matches := re.FindStringSubmatch(string(versionFile)) if len(matches) >= 2 { - s.Runtime = plan.RuntimeStruct{Language: "ruby", Version: matches[1]} + rubyVersion = matches[1] } } + if rubyVersion == "" { + // add ruby version from Gemfile + gemfile, err := os.ReadFile("Gemfile") + if err == nil { + re := regexp.MustCompile(`(?m)^ruby\s+["'](\d+\.\d+\.\d+)["']`) + matches := re.FindStringSubmatch(string(gemfile)) + if len(matches) >= 2 { + rubyVersion = matches[1] + } + } + } + + if rubyVersion == "" { + versionOutput, err := exec.Command("ruby", "--version").Output() + if err == nil { + re := regexp.MustCompile(`ruby (\d+\.\d+\.\d+)`) + matches := re.FindStringSubmatch(string(versionOutput)) + if len(matches) >= 2 { + rubyVersion = matches[1] + } + } + } + + if rubyVersion != "" { + s.Runtime = plan.RuntimeStruct{Language: "ruby", Version: rubyVersion} + } + if checksPass(sourceDir, dirContains("Gemfile", "litestack")) { // don't prompt for pg, redis if litestack is in the Gemfile s.DatabaseDesired = DatabaseKindSqlite From d69cec829bb476ede631a7206b7d3a09d777c629 Mon Sep 17 00:00:00 2001 From: Sam Ruby Date: Wed, 14 Aug 2024 14:55:03 -0400 Subject: [PATCH 021/104] defer the running of bundle install in the rails scanner to the callback --- scanner/rails.go | 34 +++++++++++----------------------- 1 file changed, 11 insertions(+), 23 deletions(-) diff --git a/scanner/rails.go b/scanner/rails.go index 17eb3dbe3d..f5da94ee60 100644 --- a/scanner/rails.go +++ b/scanner/rails.go @@ -57,18 +57,6 @@ func configureRails(sourceDir string, config *ScannerConfig) (*SourceInfo, error } } - // attempt to install bundle before proceeding - args := []string{"install"} - - if checksPass(sourceDir, fileExists("Gemfile.lock")) { - args = append(args, "--quiet") - } - - cmd := exec.Command(bundle, args...) - cmd.Stdin = nil - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - s := &SourceInfo{ Family: "Rails", Callback: RailsCallback, @@ -332,17 +320,7 @@ func RailsCallback(appName string, srcInfo *SourceInfo, plan *plan.LaunchPlan, f if pendingError != nil { pendingError = errors.Wrap(pendingError, "Failed to add dockerfile-rails gem") } else { - cmd = exec.Command(bundle, "install", "--quiet") - cmd.Stdin = nil - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - - pendingError = cmd.Run() - if pendingError != nil { - pendingError = errors.Wrap(pendingError, "Failed to install dockerfile-rails gem") - } else { - generatorInstalled = true - } + generatorInstalled = true } } } else { @@ -350,6 +328,16 @@ func RailsCallback(appName string, srcInfo *SourceInfo, plan *plan.LaunchPlan, f generatorInstalled = true } + cmd := exec.Command(bundle, "install", "--quiet") + cmd.Stdin = nil + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + err = cmd.Run() + if err != nil { + return errors.Wrap(pendingError, "Failed to install bundle, exiting") + } + // ensure Gemfile.lock includes the x86_64-linux platform if out, err := exec.Command(bundle, "platform").Output(); err == nil { if !strings.Contains(string(out), "x86_64-linux") { From 858892807b96cb8c8180f37c2cf7a24b40834b3e Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Wed, 14 Aug 2024 15:41:12 -0400 Subject: [PATCH 022/104] use launch plan subcommands, install dependencies --- deploy.rb | 46 +++++++++++++++++++++++---- deployer.Dockerfile | 48 ++++++++++++++++++++--------- internal/command/launch/sessions.go | 40 ++++++++++++++++-------- 3 files changed, 101 insertions(+), 33 deletions(-) diff --git a/deploy.rb b/deploy.rb index 3a91b965b3..846b4c7575 100755 --- a/deploy.rb +++ b/deploy.rb @@ -1,4 +1,4 @@ -#!/usr/bin/ruby +#!/usr/bin/env ruby require 'json' require 'time' @@ -14,6 +14,7 @@ module Step GIT_PULL = :git_pull PLAN = :plan CUSTOMIZE = :customize + INSTALL_DEPENDENCIES = :install_dependencies BUILD = :build FLY_POSTGRES_CREATE = :fly_postgres_create SUPABASE_POSTGRES = :supabase_postgres @@ -175,7 +176,6 @@ def get_env(name) steps.push({id: Step::GIT_PULL, description: "Setup and pull from git repository"}) if GIT_REPO steps.push({id: Step::PLAN, description: "Prepare deployment plan"}) steps.push({id: Step::CUSTOMIZE, description: "Customize deployment plan"}) -steps.push({id: Step::BUILD, description: "Build image"}) if GIT_REPO APP_REGION = get_env("DEPLOY_APP_REGION") @@ -205,7 +205,11 @@ def get_env(name) end session = in_step Step::PLAN do - cmd = "flyctl launch sessions create --session-path /tmp/session.json --manifest-path /tmp/manifest.json" + manifest = JSON.parse(exec_capture("flyctl launch plan propose").chomp) + + File.write("/tmp/manifest.json", manifest.to_json) + + cmd = "flyctl launch sessions create --session-path /tmp/session.json --manifest-path /tmp/manifest.json --from-manifest /tmp/manifest.json" if (region = APP_REGION) cmd += " --region #{region}" @@ -243,6 +247,7 @@ def get_env(name) APP_NAME = manifest["config"]["app"] ORG_SLUG = manifest["plan"]["org"] +final_region = manifest["plan"]["region"] || APP_REGION FLY_PG = manifest.dig("plan", "postgres", "fly_postgres") SUPABASE = manifest.dig("plan", "postgres", "supabase_postgres") @@ -250,6 +255,15 @@ def get_env(name) TIGRIS = manifest.dig("plan", "object_storage", "tigris_object_storage") SENTRY = manifest.dig("plan", "sentry") == true +REQUIRES_DEPENDENCIES = %w[ruby bun node elixir] + +RUNTIME_LANGUAGE = manifest.dig("plan", "runtime", "language") +RUNTIME_VERSION = manifest.dig("plan", "runtime", "version") + +DO_INSTALL_DEPS = REQUIRES_DEPENDENCIES.include?(RUNTIME_LANGUAGE) + +steps.push({id: Step::INSTALL_DEPENDENCIES, description: "Install required dependencies"}) if DO_INSTALL_DEPS +steps.push({id: Step::BUILD, description: "Build image"}) if GIT_REPO steps.push({id: Step::FLY_POSTGRES_CREATE, description: "Create and attach PostgreSQL database"}) if FLY_PG steps.push({id: Step::SUPABASE_POSTGRES, description: "Create Supabase PostgreSQL database"}) if SUPABASE steps.push({id: Step::UPSTASH_REDIS, description: "Create Upstash Redis database"}) if UPSTASH @@ -260,9 +274,29 @@ def get_env(name) artifact Artifact::META, { steps: steps } +if DO_INSTALL_DEPS + in_step Step::INSTALL_DEPENDENCIES do + case RUNTIME_LANGUAGE + when "ruby" + exec_capture("rvm install #{RUNTIME_VERSION}") + when "bun" + version = RUNTIME_VERSION || "latest" + exec_capture("asdf install bun #{version}") + when "node" + version = RUNTIME_VERSION || "latest" + exec_capture("asdf install nodejs #{version}") + when "node" + version = RUNTIME_VERSION || "1.16" + exec_capture("asdf install elixir #{version}") + end + end +end + image_tag = SecureRandom.hex(16) image_ref = in_step Step::BUILD do + exec_capture("flyctl launch plan generate -a #{APP_NAME} -o #{ORG_SLUG} --no-deploy /tmp/manifest.json") + if (image_ref = manifest.dig("config","build","image")&.strip) && !image_ref.nil? && !image_ref.empty? info("Skipping build, using image defined in fly config: #{image_ref}") image_ref @@ -278,7 +312,7 @@ def get_env(name) if FLY_PG in_step Step::FLY_POSTGRES_CREATE do pg_name = FLY_PG["app_name"] - region = APP_REGION + region = final_region cmd = "flyctl pg create --flex --org #{ORG_SLUG} --name #{pg_name} --region #{region} --yes" @@ -318,7 +352,7 @@ def get_env(name) in_step Step::UPSTASH_REDIS do db_name = "#{APP_NAME}-redis" - cmd = "flyctl redis create --name #{db_name} --org #{ORG_SLUG} --region #{APP_REGION} --yes" + cmd = "flyctl redis create --name #{db_name} --org #{ORG_SLUG} --region #{final_region} --yes" if UPSTASH["eviction"] == true cmd += " --enable-eviction" @@ -330,7 +364,7 @@ def get_env(name) cmd += " --replica-regions #{regions.join(",")}" end - artifact Artifact::UPSTASH_REDIS, { config: UPSTASH, region: APP_REGION, name: db_name } + artifact Artifact::UPSTASH_REDIS, { config: UPSTASH, region: final_region, name: db_name } exec_capture(cmd) end diff --git a/deployer.Dockerfile b/deployer.Dockerfile index 4bf953a14d..be01e626ec 100644 --- a/deployer.Dockerfile +++ b/deployer.Dockerfile @@ -1,26 +1,44 @@ -FROM debian:bookworm +FROM ubuntu:focal -RUN echo "deb http://deb.debian.org/debian testing main contrib non-free non-free-firmware" | tee /etc/apt/sources.list.d/testing.list +# RUN echo "deb http://deb.debian.org/debian testing main contrib non-free non-free-firmware" | tee /etc/apt/sources.list.d/testing.list -RUN apt update && apt install -y --no-install-recommends ca-certificates git curl clang g++ make unzip locales openssl libssl-dev ruby ruby-dev ruby-bundler build-essential libxml2 libpq-dev libyaml-dev +ENV DEBIAN_FRONTEND=noninteractive -# Erlang + Elixir -COPY --from=hexpm/elixir:1.17.2-erlang-27.0.1-debian-bookworm-20240722-slim /usr/local/bin/ /usr/local/bin -COPY --from=hexpm/elixir:1.17.2-erlang-27.0.1-debian-bookworm-20240722-slim /usr/local/lib/elixir/ /usr/local/lib/elixir -COPY --from=hexpm/elixir:1.17.2-erlang-27.0.1-debian-bookworm-20240722-slim /usr/local/lib/erlang/ /usr/local/lib/erlang -# Ensure you have everything compiled so fly launch works -RUN mix local.hex --force && mix local.rebar --force -ENV MIX_ENV=dev +RUN apt update && apt install -y --no-install-recommends software-properties-common && \ + apt-add-repository -y ppa:rael-gc/rvm && apt update && apt install -y --no-install-recommends ca-certificates git curl clang g++ make unzip locales openssl libssl-dev rvm build-essential libxml2 libpq-dev libyaml-dev procps gawk autoconf automake bison libffi-dev libgdbm-dev libncurses5-dev libsqlite3-dev libtool pkg-config sqlite3 zlib1g-dev libreadline6-dev software-properties-common -# Node.js -COPY --from=node:22-bookworm /usr/local/bin/ /usr/local/bin -COPY --from=node:22-bookworm /usr/local/lib/node_modules/ /usr/local/lib/node_modules -COPY --from=node:22-bookworm /opt/yarn-v1.22.22/ /opt/yarn-v1.22.22 -ENV NODE_ENV=development +# install a ruby +RUN /bin/bash -lc 'rvm install 3.1.6 && rvm --default use 3.1.6 && gem update --system && gem install bundler' + +# install asdf +RUN git config --global advice.detachedHead false; \ + git clone https://github.com/asdf-vm/asdf.git $HOME/.asdf --branch v0.14.0 && \ + /bin/bash -c 'echo -e "\n\n## Configure ASDF \n. $HOME/.asdf/asdf.sh" >> ~/.bashrc' && \ + /bin/bash -c 'source ~/.asdf/asdf.sh; asdf plugin add nodejs https://github.com/asdf-vm/asdf-nodejs.git' && \ + /bin/bash -c 'source ~/.asdf/asdf.sh; asdf plugin add elixir https://github.com/asdf-vm/asdf-elixir.git' && \ + /bin/bash -c 'source ~/.asdf/asdf.sh; asdf plugin add php https://github.com/asdf-community/asdf-php.git' && \ + /bin/bash -c 'source ~/.asdf/asdf.sh; asdf plugin add bun https://github.com/cometkim/asdf-bun.git' && \ + /bin/bash -c 'source ~/.asdf/asdf.sh; asdf plugin add python https://github.com/danhper/asdf-python.git' + +# # Erlang + Elixir +# COPY --from=hexpm/elixir:1.17.2-erlang-27.0.1-debian-bookworm-20240722-slim /usr/local/bin/ /usr/local/bin +# COPY --from=hexpm/elixir:1.17.2-erlang-27.0.1-debian-bookworm-20240722-slim /usr/local/lib/elixir/ /usr/local/lib/elixir +# COPY --from=hexpm/elixir:1.17.2-erlang-27.0.1-debian-bookworm-20240722-slim /usr/local/lib/erlang/ /usr/local/lib/erlang +# # Ensure you have everything compiled so fly launch works +# RUN mix local.hex --force && mix local.rebar --force +# ENV MIX_ENV=dev + +# # Node.js +# COPY --from=node:22-bookworm /usr/local/bin/ /usr/local/bin +# COPY --from=node:22-bookworm /usr/local/lib/node_modules/ /usr/local/lib/node_modules +# COPY --from=node:22-bookworm /opt/yarn-v1.22.22/ /opt/yarn-v1.22.22 +# ENV NODE_ENV=development COPY bin/flyctl /usr/local/bin/flyctl COPY deploy.rb /deploy.rb WORKDIR /usr/src/app +# need a login shell for rvm to work properly... +ENTRYPOINT ["/bin/bash", "-lc"] CMD ["/deploy.rb"] \ No newline at end of file diff --git a/internal/command/launch/sessions.go b/internal/command/launch/sessions.go index c99be61436..b74b26279a 100644 --- a/internal/command/launch/sessions.go +++ b/internal/command/launch/sessions.go @@ -8,7 +8,6 @@ import ( "os" "time" - "github.com/logrusorgru/aurora" "github.com/spf13/cobra" fly "github.com/superfly/fly-go" "github.com/superfly/flyctl/helpers" @@ -69,6 +68,9 @@ func newSessions() *cobra.Command { Description: "Use the configuration file if present without prompting", Default: false, }, + flag.String{ + Name: "from-manifest", + }, ) // not that useful anywhere else yet @@ -103,19 +105,37 @@ func newSessions() *cobra.Command { } func runSessionCreate(ctx context.Context) (err error) { - io := iostreams.FromContext(ctx) + var ( + launchManifest *LaunchManifest + cache *planBuildCache + ) - recoverableErrors := recoverableErrorBuilder{canEnterUi: false} - launchManifest, planBuildCache, err := buildManifest(ctx, nil, &recoverableErrors) + launchManifest, err = getManifestArgument(ctx) if err != nil { return err } - updateConfig(launchManifest.Plan, nil, launchManifest.Config) - if n := flag.GetInt(ctx, "internal-port"); n > 0 { - launchManifest.Config.SetInternalPort(n) + if launchManifest != nil { + // we loaded a manifest... + cache = &planBuildCache{ + appConfig: launchManifest.Config, + sourceInfo: nil, + appNameValidated: true, + warnedNoCcHa: true, + } } + // recoverableErrors := recoverableErrorBuilder{canEnterUi: false} + // launchManifest, planBuildCache, err := buildManifest(ctx, nil, &recoverableErrors) + // if err != nil { + // return err + // } + + // updateConfig(launchManifest.Plan, nil, launchManifest.Config) + // if n := flag.GetInt(ctx, "internal-port"); n > 0 { + // launchManifest.Config.SetInternalPort(n) + // } + manifestPath := flag.GetString(ctx, "manifest-path") file, err := os.Create(manifestPath) @@ -138,14 +158,10 @@ func runSessionCreate(ctx context.Context) (err error) { configPath: "fly.json", LaunchManifest: *launchManifest, env: map[string]string{}, - planBuildCache: *planBuildCache, + planBuildCache: *cache, cache: map[string]interface{}{}, } - if errors := recoverableErrors.build(); errors != "" { - fmt.Fprintf(io.ErrOut, "\n%s\n%s\n", aurora.Reverse(aurora.Red("Problems encountered that can fixed from user interaction:")), errors) - } - session, err := fly.StartCLISession(fmt.Sprintf("%s: %s", state2.Hostname(ctx), state.Plan.AppName), map[string]any{ "target": "launch", "metadata": state.Plan, From 432f560bea83e838128b898e90ec27e5fe347b60 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 19 Aug 2024 16:36:33 -0400 Subject: [PATCH 023/104] wip installing deps for more frameworks --- deploy.rb | 389 ++++++++++++------- deployer.Dockerfile | 88 +++-- internal/command/launch/cmd.go | 10 +- internal/command/launch/launch.go | 12 +- internal/command/launch/launch_frameworks.go | 7 +- internal/command/launch/plan_commands.go | 60 +-- scanner/deno.go | 3 + scanner/django.go | 3 + scanner/elixir.go | 2 + scanner/flask.go | 7 +- scanner/go.go | 5 +- scanner/laravel.go | 4 +- scanner/lucky.go | 2 + scanner/phoenix.go | 1 + scanner/python.go | 7 + scanner/rust.go | 3 + 16 files changed, 383 insertions(+), 220 deletions(-) diff --git a/deploy.rb b/deploy.rb index 846b4c7575..eff06fd3c1 100755 --- a/deploy.rb +++ b/deploy.rb @@ -7,6 +7,8 @@ require 'securerandom' require 'fileutils' +puts ENV["PATH"] + LOG_PREFIX = ENV["LOG_PREFIX"] module Step @@ -15,6 +17,7 @@ module Step PLAN = :plan CUSTOMIZE = :customize INSTALL_DEPENDENCIES = :install_dependencies + GENERATE_BUILD_REQUIREMENTS = :generate_build_requirements BUILD = :build FLY_POSTGRES_CREATE = :fly_postgres_create SUPABASE_POSTGRES = :supabase_postgres @@ -22,6 +25,14 @@ module Step TIGRIS_OBJECT_STORAGE = :tigris_object_storage SENTRY = :sentry DEPLOY = :deploy + + def self.current + Thread.current[:step] ||= Step::ROOT + end + + def self.set_current(step) + Thread.current[:step] = step + end end module Artifact @@ -39,8 +50,6 @@ module Artifact DOCKER_IMAGE = :docker_image end -$current_step = Step::ROOT - $counter = 0 $counter_mutex = Mutex.new @@ -58,7 +67,7 @@ def elapsed end def nputs(type:, payload: nil) - obj = { id: id(), step: $current_step, type: type, time: elapsed(), payload: payload }.compact + obj = { id: id(), step: Step.current(), type: type, time: elapsed(), payload: payload }.compact puts "#{LOG_PREFIX}#{obj.to_json}" end @@ -88,41 +97,45 @@ def error(msg) end def exec_capture(cmd, display = nil) - event :exec, { cmd: display || cmd } + cmd_display = display || cmd + event :exec, { cmd: cmd_display } - out_mutex = Mutex.new - output = "" + out_mutex = Mutex.new + output = "" - status = Open3.popen3(cmd) do |stdin, stdout, stderr, wait_thr| - pid = wait_thr.pid + status = Open3.popen3(cmd) do |stdin, stdout, stderr, wait_thr| + pid = wait_thr.pid - stdin.close_write + stdin.close_write - threads = [[stdout, "stdout"], [stderr, "stderr"]].map do |stream, stream_name| - Thread.new do - stream.each_line do |line| - nputs type: stream_name, payload: line.chomp - out_mutex.synchronize { output += line } - end + step = Step.current + + threads = [[stdout, "stdout"], [stderr, "stderr"]].map do |stream, stream_name| + Thread.new do + Step.set_current(step) # in_step would be a problem here, we just need to that the thread with the parent thread's step! + stream.each_line do |line| + nputs type: stream_name, payload: line.chomp + out_mutex.synchronize { output += line } end end + end - threads.each { |thr| thr.join } + threads.each { |thr| thr.join } - wait_thr.value - end + wait_thr.value + end - if !status.success? - event :error, { type: :exec, message: "unsuccessful command '#{cmd}'", exit_code: status.exitstatus, pid: status.pid } - exit 1 - end + if !status.success? + event :error, { type: :exec, message: "unsuccessful command '#{cmd_display}'", exit_code: status.exitstatus, pid: status.pid } + exit 1 + end - output + output end def in_step(step, &block) - old_step = $current_step - $current_step = step + old_step = Step.current() + Step.set_current(step) event :start ret = begin yield block @@ -131,7 +144,7 @@ def in_step(step, &block) exit 1 end event :end - $current_step = old_step + Step.set_current(old_step) ret end @@ -139,10 +152,10 @@ def ts Time.now.utc.iso8601(6) end -def get_env(name) +def get_env(name, default = nil) value = ENV[name]&.strip if value.nil? || value.empty? - return nil + return nil || default end value end @@ -151,6 +164,23 @@ def get_env(name) event :start, { ts: ts() } +DEPLOY_NOW = !get_env("DEPLOY_NOW").nil? +DEPLOY_CUSTOMIZE = !get_env("DEPLOY_CUSTOMIZE").nil? + +DEPLOY_APP_NAME = get_env("DEPLOY_APP_NAME") +if !DEPLOY_CUSTOMIZE && !DEPLOY_APP_NAME + event :error, { type: :validation, message: "missing app name" } + exit 1 +end + +DEPLOY_ORG_SLUG = get_env("DEPLOY_ORG_SLUG") +if !DEPLOY_CUSTOMIZE && !DEPLOY_ORG_SLUG + event :error, { type: :validation, message: "missing organization slug" } + exit 1 +end + +DEPLOY_APP_REGION = get_env("DEPLOY_APP_REGION") + GIT_REPO = get_env("GIT_REPO") GIT_REPO_URL = if GIT_REPO @@ -169,19 +199,14 @@ def get_env(name) repo_url end -DEPLOY_NOW = !get_env("DEPLOY_NOW").nil? - steps = [] steps.push({id: Step::GIT_PULL, description: "Setup and pull from git repository"}) if GIT_REPO steps.push({id: Step::PLAN, description: "Prepare deployment plan"}) -steps.push({id: Step::CUSTOMIZE, description: "Customize deployment plan"}) - -APP_REGION = get_env("DEPLOY_APP_REGION") +steps.push({id: Step::CUSTOMIZE, description: "Customize deployment plan"}) if DEPLOY_CUSTOMIZE if GIT_REPO_URL in_step Step::GIT_PULL do - `git config --global init.defaultBranch main` # NOTE: this is to avoid a large warning message ref = get_env("GIT_REF") artifact Artifact::GIT_INFO, { repository: GIT_REPO, reference: ref } @@ -204,42 +229,137 @@ def get_env(name) end end -session = in_step Step::PLAN do - manifest = JSON.parse(exec_capture("flyctl launch plan propose").chomp) +manifest = in_step Step::PLAN do + cmd = "flyctl launch plan propose --force-name" - File.write("/tmp/manifest.json", manifest.to_json) + if (slug = DEPLOY_ORG_SLUG) + cmd += " --org #{slug}" + end - cmd = "flyctl launch sessions create --session-path /tmp/session.json --manifest-path /tmp/manifest.json --from-manifest /tmp/manifest.json" - - if (region = APP_REGION) + if (name = DEPLOY_APP_NAME) + cmd += " --name #{name}" + end + + if (region = DEPLOY_APP_REGION) cmd += " --region #{region}" end - cmd += " --copy-config" if get_env("DEPLOY_COPY_CONFIG") + # cmd += " --copy-config" if get_env("DEPLOY_COPY_CONFIG") + + raw_manifest = exec_capture("#{cmd}").chomp + + begin + manifest = JSON.parse(raw_manifest) + rescue StandardError => e + event :error, { type: :json, message: e, json: raw_manifest } + exit 1 + end - exec_capture(cmd) - session = JSON.parse(File.read("/tmp/session.json")) - manifest = JSON.parse(File.read("/tmp/manifest.json")) + File.write("/tmp/manifest.json", manifest.to_json) artifact Artifact::MANIFEST, manifest - artifact Artifact::SESSION, session - exec_capture("git add -A") - diff = exec_capture("git diff --cached") - artifact Artifact::DIFF, diff + manifest +end + +REQUIRES_DEPENDENCIES = %w[ruby bun node elixir python php] - session +RUNTIME_LANGUAGE = manifest.dig("plan", "runtime", "language") +RUNTIME_VERSION = manifest.dig("plan", "runtime", "version") + +DO_INSTALL_DEPS = REQUIRES_DEPENDENCIES.include?(RUNTIME_LANGUAGE) + +steps.push({id: Step::INSTALL_DEPENDENCIES, description: "Install required dependencies"}) if DO_INSTALL_DEPS +steps.push({id: Step::GENERATE_BUILD_REQUIREMENTS, description: "Generate requirements for build"}) + +DEFAULT_ERLANG_VERSION = get_env("DEFAULT_ERLANG_VERSION", "26.2.5.2") + +DEFAULT_RUNTIME_VERSIONS = { + "ruby" => get_env("DEFAULT_RUBY_VERSION", "3.1.6"), + "elixir" => get_env("DEFAULT_ELIXIR_VERSION", "1.16"), + "erlang" => DEFAULT_ERLANG_VERSION, + "node" => get_env("DEFAULT_NODE_VERSION", "20.16.0"), + "bun" => get_env("DEFAULT_BUN_VERSION", "1.1.24"), + "php" => get_env("DEFAULT_PHP_VERSION", "8.1"), + "python" => get_env("DEFAULT_PYTHON_VERSION", "3.12") +} + +ASDF_SUPPORTED_FLYCTL_LANGUAGES = %w[ bun node elixir ] +FLYCTL_TO_ASDF_PLUGIN_NAME = { + "node" => "nodejs" +} + +INSTALLABLE_PHP_VERSIONS = %w[ 5.6 7.0 7.1 7.2 7.3 7.4 8.0 8.1 8.2 8.3 8.4 ] + +deps_thread = Thread.new do + if DO_INSTALL_DEPS + in_step Step::INSTALL_DEPENDENCIES do + # get the version + version = DEFAULT_RUNTIME_VERSIONS[RUNTIME_LANGUAGE] + if version.nil? + event :error, { type: :unsupported_version, message: "unhandled runtime: #{RUNTIME_LANGUAGE}, supported: #{DEFAULT_RUNTIME_VERSIONS.keys.join(", ")}" } + exit 1 + end + + version = RUNTIME_VERSION || version + + if ASDF_SUPPORTED_FLYCTL_LANGUAGES.include?(RUNTIME_LANGUAGE) + plugin = FLYCTL_TO_ASDF_PLUGIN_NAME.fetch(RUNTIME_LANGUAGE, RUNTIME_LANGUAGE) + if plugin == "elixir" + # required for elixir to work + exec_capture("asdf install erlang #{DEFAULT_ERLANG_VERSION}") + end + exec_capture("asdf install #{plugin} #{version}") + else + case RUNTIME_LANGUAGE + when "ruby" + exec_capture("rvm install #{version}") + when "php" + major, minor = Gem::Version.new(version).segments + php_version = "#{major}.#{minor}" + if !INSTALLABLE_PHP_VERSIONS.include?(php_version) + event :error, { type: :unsupported_version, message: "unsupported PHP version #{version}, supported versions are: #{INSTALLABLE_PHP_VERSIONS.join(", ")}" } + exit 1 + end + exec_capture("apt install --no-install-recommends -y php#{php_version} php#{php_version}-curl php#{php_version}-mbstring php#{php_version}-xml") + exec_capture("curl -sS https://getcomposer.org/installer -o /tmp/composer-setup.php") + # TODO: verify signature? + exec_capture("php /tmp/composer-setup.php --install-dir=/usr/local/bin --filename=composer") + else + # we should never get here, but handle it in case! + event :error, { type: :unsupported_version, message: "no handler for runtime: #{RUNTIME_LANGUAGE}, supported: #{DEFAULT_RUNTIME_VERSIONS.keys.join(", ")}" } + exit 1 + end + end + end + end + + in_step Step::GENERATE_BUILD_REQUIREMENTS do + exec_capture("flyctl launch plan generate /tmp/manifest.json") + exec_capture("git add -A") + diff = exec_capture("git diff --cached") + artifact Artifact::DIFF, diff + end end -manifest = in_step Step::CUSTOMIZE do - cmd = "flyctl launch sessions finalize --session-path /tmp/session.json --manifest-path /tmp/manifest.json" +if DEPLOY_CUSTOMIZE + manifest = in_step Step::CUSTOMIZE do + cmd = "flyctl launch sessions create --session-path /tmp/session.json --manifest-path /tmp/manifest.json --from-manifest /tmp/manifest.json" - exec_capture(cmd) - manifest = JSON.parse(File.read("/tmp/manifest.json")) + exec_capture(cmd) + session = JSON.parse(File.read("/tmp/session.json")) - artifact Artifact::MANIFEST, manifest + artifact Artifact::SESSION, session - manifest + cmd = "flyctl launch sessions finalize --session-path /tmp/session.json --manifest-path /tmp/manifest.json" + + exec_capture(cmd) + manifest = JSON.parse(File.read("/tmp/manifest.json")) + + artifact Artifact::MANIFEST, manifest + + manifest + end end # Write the fly config file to a tmp directory @@ -247,7 +367,7 @@ def get_env(name) APP_NAME = manifest["config"]["app"] ORG_SLUG = manifest["plan"]["org"] -final_region = manifest["plan"]["region"] || APP_REGION +APP_REGION = manifest["plan"]["region"] FLY_PG = manifest.dig("plan", "postgres", "fly_postgres") SUPABASE = manifest.dig("plan", "postgres", "supabase_postgres") @@ -255,14 +375,6 @@ def get_env(name) TIGRIS = manifest.dig("plan", "object_storage", "tigris_object_storage") SENTRY = manifest.dig("plan", "sentry") == true -REQUIRES_DEPENDENCIES = %w[ruby bun node elixir] - -RUNTIME_LANGUAGE = manifest.dig("plan", "runtime", "language") -RUNTIME_VERSION = manifest.dig("plan", "runtime", "version") - -DO_INSTALL_DEPS = REQUIRES_DEPENDENCIES.include?(RUNTIME_LANGUAGE) - -steps.push({id: Step::INSTALL_DEPENDENCIES, description: "Install required dependencies"}) if DO_INSTALL_DEPS steps.push({id: Step::BUILD, description: "Build image"}) if GIT_REPO steps.push({id: Step::FLY_POSTGRES_CREATE, description: "Create and attach PostgreSQL database"}) if FLY_PG steps.push({id: Step::SUPABASE_POSTGRES, description: "Create Supabase PostgreSQL database"}) if SUPABASE @@ -274,29 +386,12 @@ def get_env(name) artifact Artifact::META, { steps: steps } -if DO_INSTALL_DEPS - in_step Step::INSTALL_DEPENDENCIES do - case RUNTIME_LANGUAGE - when "ruby" - exec_capture("rvm install #{RUNTIME_VERSION}") - when "bun" - version = RUNTIME_VERSION || "latest" - exec_capture("asdf install bun #{version}") - when "node" - version = RUNTIME_VERSION || "latest" - exec_capture("asdf install nodejs #{version}") - when "node" - version = RUNTIME_VERSION || "1.16" - exec_capture("asdf install elixir #{version}") - end - end -end +# Join the parallel task thread +deps_thread.join image_tag = SecureRandom.hex(16) image_ref = in_step Step::BUILD do - exec_capture("flyctl launch plan generate -a #{APP_NAME} -o #{ORG_SLUG} --no-deploy /tmp/manifest.json") - if (image_ref = manifest.dig("config","build","image")&.strip) && !image_ref.nil? && !image_ref.empty? info("Skipping build, using image defined in fly config: #{image_ref}") image_ref @@ -309,96 +404,98 @@ def get_env(name) end end -if FLY_PG - in_step Step::FLY_POSTGRES_CREATE do - pg_name = FLY_PG["app_name"] - region = final_region +if get_env("SKIP_EXTENSIONS").nil? + if FLY_PG + in_step Step::FLY_POSTGRES_CREATE do + pg_name = FLY_PG["app_name"] + region = APP_REGION - cmd = "flyctl pg create --flex --org #{ORG_SLUG} --name #{pg_name} --region #{region} --yes" + cmd = "flyctl pg create --flex --org #{ORG_SLUG} --name #{pg_name} --region #{region} --yes" - if (vm_size = FLY_PG["vm_size"]) - cmd += " --vm-size #{vm_size}" - end + if (vm_size = FLY_PG["vm_size"]) + cmd += " --vm-size #{vm_size}" + end - if (vm_memory = FLY_PG["vm_ram"]) - cmd += " --vm-memory #{vm_memory}" - end + if (vm_memory = FLY_PG["vm_ram"]) + cmd += " --vm-memory #{vm_memory}" + end - if (nodes = FLY_PG["nodes"]) - cmd += " --initial-cluster-size #{nodes}" - end + if (nodes = FLY_PG["nodes"]) + cmd += " --initial-cluster-size #{nodes}" + end - if (disk_size_gb = FLY_PG["disk_size_gb"]) - cmd += " --volume-size #{disk_size_gb}" - end + if (disk_size_gb = FLY_PG["disk_size_gb"]) + cmd += " --volume-size #{disk_size_gb}" + end - artifact Artifact::FLY_POSTGRES, { name: pg_name, region: region, config: FLY_PG } + artifact Artifact::FLY_POSTGRES, { name: pg_name, region: region, config: FLY_PG } - exec_capture(cmd) + exec_capture(cmd) - exec_capture("flyctl pg attach #{pg_name} --app #{APP_NAME} -y") - end -elsif SUPABASE - in_step Step::SUPABASE_POSTGRES do - cmd = "flyctl ext supabase create --org #{ORG_SLUG} --name #{SUPABASE["db_name"]} --region #{SUPABASE["region"]} --app #{APP_NAME} --yes" + exec_capture("flyctl pg attach #{pg_name} --app #{APP_NAME} -y") + end + elsif SUPABASE + in_step Step::SUPABASE_POSTGRES do + cmd = "flyctl ext supabase create --org #{ORG_SLUG} --name #{SUPABASE["db_name"]} --region #{SUPABASE["region"]} --app #{APP_NAME} --yes" - artifact Artifact::SUPABASE_POSTGRES, { config: SUPABASE } + artifact Artifact::SUPABASE_POSTGRES, { config: SUPABASE } - exec_capture(cmd) + exec_capture(cmd) + end end -end -if UPSTASH - in_step Step::UPSTASH_REDIS do - db_name = "#{APP_NAME}-redis" + if UPSTASH + in_step Step::UPSTASH_REDIS do + db_name = "#{APP_NAME}-redis" - cmd = "flyctl redis create --name #{db_name} --org #{ORG_SLUG} --region #{final_region} --yes" + cmd = "flyctl redis create --name #{db_name} --org #{ORG_SLUG} --region #{APP_REGION} --yes" - if UPSTASH["eviction"] == true - cmd += " --enable-eviction" - elsif UPSTASH["eviction"] == false - cmd += " --disable-eviction" - end + if UPSTASH["eviction"] == true + cmd += " --enable-eviction" + elsif UPSTASH["eviction"] == false + cmd += " --disable-eviction" + end - if (regions = UPSTASH["regions"]) - cmd += " --replica-regions #{regions.join(",")}" - end + if (regions = UPSTASH["regions"]) + cmd += " --replica-regions #{regions.join(",")}" + end - artifact Artifact::UPSTASH_REDIS, { config: UPSTASH, region: final_region, name: db_name } + artifact Artifact::UPSTASH_REDIS, { config: UPSTASH, region: APP_REGION, name: db_name } - exec_capture(cmd) + exec_capture(cmd) + end end -end -if TIGRIS - in_step Step::TIGRIS_OBJECT_STORAGE do - cmd = "flyctl ext tigris create --org #{ORG_SLUG} --app #{APP_NAME} --yes" + if TIGRIS + in_step Step::TIGRIS_OBJECT_STORAGE do + cmd = "flyctl ext tigris create --org #{ORG_SLUG} --app #{APP_NAME} --yes" - if (name = TIGRIS["name"]) && !name.empty? - cmd += " --name #{name}" - end + if (name = TIGRIS["name"]) && !name.empty? + cmd += " --name #{name}" + end - if (pub = TIGRIS["public"]) && pub == true - cmd += " --public" - end + if (pub = TIGRIS["public"]) && pub == true + cmd += " --public" + end - if (accel = TIGRIS["accelerate"]) && accel == true - cmd += " --accelerate" - end + if (accel = TIGRIS["accelerate"]) && accel == true + cmd += " --accelerate" + end - if (domain = TIGRIS["website_domain_name"]) && !domain.empty? - cmd += " --website-domain-name #{domain}" - end + if (domain = TIGRIS["website_domain_name"]) && !domain.empty? + cmd += " --website-domain-name #{domain}" + end - artifact Artifact::TIGRIS_OBJECT_STORAGE, { config: TIGRIS } + artifact Artifact::TIGRIS_OBJECT_STORAGE, { config: TIGRIS } - exec_capture(cmd) + exec_capture(cmd) + end end -end -if SENTRY - in_step Step::SENTRY do - exec_capture("flyctl ext sentry create --app #{APP_NAME} --yes") + if SENTRY + in_step Step::SENTRY do + exec_capture("flyctl ext sentry create --app #{APP_NAME} --yes") + end end end diff --git a/deployer.Dockerfile b/deployer.Dockerfile index be01e626ec..fa94d20319 100644 --- a/deployer.Dockerfile +++ b/deployer.Dockerfile @@ -1,38 +1,62 @@ -FROM ubuntu:focal - -# RUN echo "deb http://deb.debian.org/debian testing main contrib non-free non-free-firmware" | tee /etc/apt/sources.list.d/testing.list +FROM ubuntu:20.04 ENV DEBIAN_FRONTEND=noninteractive -RUN apt update && apt install -y --no-install-recommends software-properties-common && \ - apt-add-repository -y ppa:rael-gc/rvm && apt update && apt install -y --no-install-recommends ca-certificates git curl clang g++ make unzip locales openssl libssl-dev rvm build-essential libxml2 libpq-dev libyaml-dev procps gawk autoconf automake bison libffi-dev libgdbm-dev libncurses5-dev libsqlite3-dev libtool pkg-config sqlite3 zlib1g-dev libreadline6-dev software-properties-common - -# install a ruby -RUN /bin/bash -lc 'rvm install 3.1.6 && rvm --default use 3.1.6 && gem update --system && gem install bundler' - -# install asdf -RUN git config --global advice.detachedHead false; \ - git clone https://github.com/asdf-vm/asdf.git $HOME/.asdf --branch v0.14.0 && \ - /bin/bash -c 'echo -e "\n\n## Configure ASDF \n. $HOME/.asdf/asdf.sh" >> ~/.bashrc' && \ - /bin/bash -c 'source ~/.asdf/asdf.sh; asdf plugin add nodejs https://github.com/asdf-vm/asdf-nodejs.git' && \ - /bin/bash -c 'source ~/.asdf/asdf.sh; asdf plugin add elixir https://github.com/asdf-vm/asdf-elixir.git' && \ - /bin/bash -c 'source ~/.asdf/asdf.sh; asdf plugin add php https://github.com/asdf-community/asdf-php.git' && \ - /bin/bash -c 'source ~/.asdf/asdf.sh; asdf plugin add bun https://github.com/cometkim/asdf-bun.git' && \ - /bin/bash -c 'source ~/.asdf/asdf.sh; asdf plugin add python https://github.com/danhper/asdf-python.git' - -# # Erlang + Elixir -# COPY --from=hexpm/elixir:1.17.2-erlang-27.0.1-debian-bookworm-20240722-slim /usr/local/bin/ /usr/local/bin -# COPY --from=hexpm/elixir:1.17.2-erlang-27.0.1-debian-bookworm-20240722-slim /usr/local/lib/elixir/ /usr/local/lib/elixir -# COPY --from=hexpm/elixir:1.17.2-erlang-27.0.1-debian-bookworm-20240722-slim /usr/local/lib/erlang/ /usr/local/lib/erlang -# # Ensure you have everything compiled so fly launch works -# RUN mix local.hex --force && mix local.rebar --force -# ENV MIX_ENV=dev - -# # Node.js -# COPY --from=node:22-bookworm /usr/local/bin/ /usr/local/bin -# COPY --from=node:22-bookworm /usr/local/lib/node_modules/ /usr/local/lib/node_modules -# COPY --from=node:22-bookworm /opt/yarn-v1.22.22/ /opt/yarn-v1.22.22 -# ENV NODE_ENV=development +RUN apt update && \ + apt install -y --no-install-recommends software-properties-common && \ + apt-add-repository -y ppa:rael-gc/rvm && apt-add-repository -y ppa:ondrej/php && apt update && \ + apt install -y --no-install-recommends ca-certificates git curl clang g++ make unzip locales openssl libssl-dev rvm build-essential libxml2 libpq-dev libyaml-dev procps gawk autoconf automake bison libffi-dev libgdbm-dev libncurses5-dev libsqlite3-dev libtool pkg-config sqlite3 zlib1g-dev libreadline6-dev locales mlocate + +SHELL ["/bin/bash", "-lc"] + +RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ + dpkg-reconfigure --frontend=noninteractive locales && \ + update-locale LANG=en_US.UTF-8 + +ENV LANG en_US.UTF-8 + +# configure git a bit +RUN git config --global advice.detachedHead false && \ + git config --global init.defaultBranch main + +ENV DEFAULT_RUBY_VERSION=3.1.6 \ + DEFAULT_NODE_VERSION=20.16.0 \ + DEFAULT_ERLANG_VERSION=26.2.5.2 \ + DEFAULT_ELIXIR_VERSION=1.16 \ + DEFAULT_BUN_VERSION=1.1.24 \ + DEFAULT_PHP_VERSION=8.1.0 \ + DEFAULT_PYTHON_VERSION=3.12 + +ARG NODE_BUILD_VERSION=5.3.8 + +# install a ruby to run the initial script +RUN /bin/bash -lc 'rvm install $DEFAULT_RUBY_VERSION && rvm --default use $DEFAULT_RUBY_VERSION && gem update --system && gem install bundler' + +# install mise +RUN curl https://mise.run | MISE_VERSION=v2024.8.6 sh && \ + echo -e "\n\nexport PATH=\"$HOME/.local/share/mise/shims:$PATH\"" >> ~/.profile + +ENV MISE_PYTHON_COMPILE=false + +# install asdf, its plugins and dependencies +RUN git clone https://github.com/asdf-vm/asdf.git $HOME/.asdf --branch v0.14.0 && \ + echo -e "\n\n## Configure ASDF \n. $HOME/.asdf/asdf.sh" >> ~/.profile && \ + source $HOME/.asdf/asdf.sh && \ + # nodejs + curl -L https://github.com/nodenv/node-build/archive/refs/tags/v$NODE_BUILD_VERSION.tar.gz -o node-build.tar.gz && \ + tar -xzf node-build.tar.gz && \ + env PREFIX=/usr/local ./node-build-$NODE_BUILD_VERSION/install.sh && \ + asdf plugin add nodejs https://github.com/asdf-vm/asdf-nodejs.git && \ + # elixir + asdf plugin-add erlang https://github.com/michallepicki/asdf-erlang-prebuilt-ubuntu-20.04.git && \ + echo -e "local.hex\nlocal.rebar" > $HOME/.default-mix-commands && \ + asdf plugin add elixir https://github.com/asdf-vm/asdf-elixir.git && \ + asdf install erlang $DEFAULT_ERLANG_VERSION && asdf global erlang $DEFAULT_ERLANG_VERSION && \ + asdf install elixir $DEFAULT_ELIXIR_VERSION && asdf global elixir $DEFAULT_ELIXIR_VERSION && \ + # bun + asdf plugin add bun https://github.com/cometkim/asdf-bun.git + +ENV MIX_ENV=dev COPY bin/flyctl /usr/local/bin/flyctl COPY deploy.rb /deploy.rb diff --git a/internal/command/launch/cmd.go b/internal/command/launch/cmd.go index 6cf2359c25..f32f275dcc 100644 --- a/internal/command/launch/cmd.go +++ b/internal/command/launch/cmd.go @@ -283,7 +283,9 @@ func run(ctx context.Context) (err error) { return err } - if launchManifest != nil { + planStep := plan.GetPlanStep(ctx) + + if launchManifest != nil && planStep != "generate" { // we loaded a manifest... cache = &planBuildCache{ appConfig: launchManifest.Config, @@ -310,7 +312,10 @@ func run(ctx context.Context) (err error) { launchManifest, cache, err = buildManifest(ctx, parentConfig, &recoverableErrors) if err != nil { var recoverableErr recoverableInUiError - if errors.As(err, &recoverableErr) && canEnterUi { + if errors.As(err, &recoverableErr) { + if !canEnterUi { + return err + } } else { return err } @@ -359,7 +364,6 @@ func run(ctx context.Context) (err error) { family = state.sourceInfo.Family } - planStep := plan.GetPlanStep(ctx) if planStep == "" { fmt.Fprintf( io.Out, diff --git a/internal/command/launch/launch.go b/internal/command/launch/launch.go index db0c4fd9aa..21827b619c 100644 --- a/internal/command/launch/launch.go +++ b/internal/command/launch/launch.go @@ -95,11 +95,6 @@ func (state *launchState) Launch(ctx context.Context) error { } } - // Override internal port if requested using --internal-port flag - if n := flag.GetInt(ctx, "internal-port"); n > 0 { - state.appConfig.SetInternalPort(n) - } - // Sentry if !flag.GetBool(ctx, "no-create") { if err = state.launchSentry(ctx, state.Plan.AppName); err != nil { @@ -107,6 +102,13 @@ func (state *launchState) Launch(ctx context.Context) error { } } + if planStep != "generate" { + // Override internal port if requested using --internal-port flag + if n := flag.GetInt(ctx, "internal-port"); n > 0 { + state.appConfig.SetInternalPort(n) + } + } + // Finally write application configuration to fly.toml configDir, configFile := filepath.Split(state.configPath) configFileOverride := flag.GetString(ctx, flagnames.AppConfigFilePath) diff --git a/internal/command/launch/launch_frameworks.go b/internal/command/launch/launch_frameworks.go index 8aa5b0647d..60bae2ef91 100644 --- a/internal/command/launch/launch_frameworks.go +++ b/internal/command/launch/launch_frameworks.go @@ -15,6 +15,7 @@ import ( "github.com/superfly/flyctl/gql" "github.com/superfly/flyctl/helpers" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/command/launch/plan" "github.com/superfly/flyctl/internal/flag" "github.com/superfly/flyctl/internal/flyutil" "github.com/superfly/flyctl/internal/prompt" @@ -29,8 +30,10 @@ func (state *launchState) setupGitHubActions(ctx context.Context, appName string gh, err := exec.LookPath("gh") if err != nil { - fmt.Println("Run `fly tokens create deploy -x 999999h` to create a token and set it as the FLY_API_TOKEN secret in your GitHub repository settings") - fmt.Println("See https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions") + if plan.GetPlanStep(ctx) == "" { + fmt.Println("Run `fly tokens create deploy -x 999999h` to create a token and set it as the FLY_API_TOKEN secret in your GitHub repository settings") + fmt.Println("See https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions") + } } else { apiClient := flyutil.ClientFromContext(ctx) diff --git a/internal/command/launch/plan_commands.go b/internal/command/launch/plan_commands.go index 3b035037dd..b8e9d1f71a 100644 --- a/internal/command/launch/plan_commands.go +++ b/internal/command/launch/plan_commands.go @@ -33,6 +33,8 @@ func newPropose() *cobra.Command { cmd := command.New("propose", desc, desc, runPropose) flag.Add(cmd, + flag.Region(), + flag.Org(), flag.String{ Name: "from", Description: "A github repo URL to use as a template for the new app", @@ -49,6 +51,19 @@ func newPropose() *cobra.Command { Default: true, Hidden: true, }, + flag.String{ + Name: "name", + Description: `Name of the new app`, + }, + flag.Bool{ + Name: "force-name", + Hidden: true, + }, + flag.Bool{ + Name: "copy-config", + Description: "Use the configuration file if present without prompting", + Default: false, + }, ) return cmd @@ -61,8 +76,7 @@ func newCreate() *cobra.Command { flag.Add(cmd, flag.String{ - Name: "manifest-path", - Shorthand: "p", + Name: "from-manifest", Description: "Path to read the manifest from", Default: "", Hidden: true, @@ -79,8 +93,7 @@ func newPostgres() *cobra.Command { flag.Add(cmd, flag.String{ - Name: "manifest-path", - Shorthand: "p", + Name: "from-manifest", Description: "Path to read the manifest from", Default: "", Hidden: true, @@ -97,8 +110,7 @@ func newRedis() *cobra.Command { flag.Add(cmd, flag.String{ - Name: "manifest-path", - Shorthand: "p", + Name: "from-manifest", Description: "Path to read the manifest from", Default: "", Hidden: true, @@ -115,8 +127,7 @@ func newTigris() *cobra.Command { flag.Add(cmd, flag.String{ - Name: "manifest-path", - Shorthand: "p", + Name: "from-manifest", Description: "Path to read the manifest from", Default: "", Hidden: true, @@ -132,25 +143,18 @@ func newGenerate() *cobra.Command { cmd.Args = cobra.ExactArgs(1) flag.Add(cmd, - flag.App(), - flag.Region(), - flag.Org(), - flag.AppConfig(), + // flag.App(), + // flag.Region(), + // flag.Org(), + // flag.AppConfig(), flag.Bool{ Name: "no-deploy", Description: "Don't deploy the app", Default: true, Hidden: true, }, - flag.Int{ - Name: "internal-port", - Description: "Set internal_port for all services in the generated fly.toml", - Default: -1, - Hidden: true, - }, flag.String{ - Name: "manifest-path", - Shorthand: "p", + Name: "from-manifest", Description: "Path to read the manifest from", Default: "", Hidden: true, @@ -166,36 +170,34 @@ func RunPlan(ctx context.Context, step string) error { } func runPropose(ctx context.Context) error { - RunPlan(ctx, "propose") - return nil + return RunPlan(ctx, "propose") } func runCreate(ctx context.Context) error { - flag.SetString(ctx, "manifest-path", flag.FirstArg(ctx)) + flag.SetString(ctx, "from-manifest", flag.FirstArg(ctx)) RunPlan(ctx, "create") return nil } func runPostgres(ctx context.Context) error { - flag.SetString(ctx, "manifest-path", flag.FirstArg(ctx)) + flag.SetString(ctx, "from-manifest", flag.FirstArg(ctx)) RunPlan(ctx, "postgres") return nil } func runRedis(ctx context.Context) error { - flag.SetString(ctx, "manifest-path", flag.FirstArg(ctx)) + flag.SetString(ctx, "from-manifest", flag.FirstArg(ctx)) RunPlan(ctx, "redis") return nil } func runTigris(ctx context.Context) error { - flag.SetString(ctx, "manifest-path", flag.FirstArg(ctx)) + flag.SetString(ctx, "from-manifest", flag.FirstArg(ctx)) RunPlan(ctx, "tigris") return nil } func runGenerate(ctx context.Context) error { - flag.SetString(ctx, "manifest-path", flag.FirstArg(ctx)) - RunPlan(ctx, "generate") - return nil + flag.SetString(ctx, "from-manifest", flag.FirstArg(ctx)) + return RunPlan(ctx, "generate") } diff --git a/scanner/deno.go b/scanner/deno.go index 90a54f3302..441ee982cb 100644 --- a/scanner/deno.go +++ b/scanner/deno.go @@ -1,5 +1,7 @@ package scanner +import "github.com/superfly/flyctl/internal/command/launch/plan" + func configureDeno(sourceDir string, config *ScannerConfig) (*SourceInfo, error) { if !checksPass( sourceDir, @@ -21,6 +23,7 @@ func configureDeno(sourceDir string, config *ScannerConfig) (*SourceInfo, error) Env: map[string]string{ "PORT": "8080", }, + Runtime: plan.RuntimeStruct{Language: "deno"}, } return s, nil diff --git a/scanner/django.go b/scanner/django.go index 25cc9f8f2e..2e35e54f8b 100644 --- a/scanner/django.go +++ b/scanner/django.go @@ -11,6 +11,7 @@ import ( "github.com/logrusorgru/aurora" "github.com/mattn/go-zglob" "github.com/superfly/flyctl/helpers" + "github.com/superfly/flyctl/internal/command/launch/plan" ) // setup django with a postgres database @@ -275,6 +276,8 @@ For detailed documentation, see https://fly.dev/docs/django/ s.Files = templatesExecute("templates/django", vars) + s.Runtime = plan.RuntimeStruct{Language: "python", Version: pythonVersion} + return s, nil } diff --git a/scanner/elixir.go b/scanner/elixir.go index 711c72ca70..cdedf3dcc3 100644 --- a/scanner/elixir.go +++ b/scanner/elixir.go @@ -4,6 +4,7 @@ import ( "path/filepath" "github.com/superfly/flyctl/helpers" + "github.com/superfly/flyctl/internal/command/launch/plan" ) func configureElixir(sourceDir string, config *ScannerConfig) (*SourceInfo, error) { @@ -19,6 +20,7 @@ func configureElixir(sourceDir string, config *ScannerConfig) (*SourceInfo, erro Env: map[string]string{ "PORT": "8080", }, + Runtime: plan.RuntimeStruct{Language: "elixir"}, } return s, nil diff --git a/scanner/flask.go b/scanner/flask.go index 97f44b5c07..0e8aebac4f 100644 --- a/scanner/flask.go +++ b/scanner/flask.go @@ -1,6 +1,10 @@ package scanner -import "fmt" +import ( + "fmt" + + "github.com/superfly/flyctl/internal/command/launch/plan" +) func configureFlask(sourceDir string, _ *ScannerConfig) (*SourceInfo, error) { // require "Flask" to be in requirements.txt @@ -32,6 +36,7 @@ func configureFlask(sourceDir string, _ *ScannerConfig) (*SourceInfo, error) { Port: 8080, SkipDeploy: true, DeployDocs: `We have generated a simple Dockerfile for you. Modify it to fit your needs and run "fly deploy" to deploy your application.`, + Runtime: plan.RuntimeStruct{Language: "python"}, } return s, nil diff --git a/scanner/go.go b/scanner/go.go index 93be10ea76..51073644d5 100644 --- a/scanner/go.go +++ b/scanner/go.go @@ -2,9 +2,11 @@ package scanner import ( "fmt" + "os" + + "github.com/superfly/flyctl/internal/command/launch/plan" "github.com/superfly/flyctl/terminal" "golang.org/x/mod/modfile" - "os" ) func configureGo(sourceDir string, config *ScannerConfig) (*SourceInfo, error) { @@ -19,6 +21,7 @@ func configureGo(sourceDir string, config *ScannerConfig) (*SourceInfo, error) { Env: map[string]string{ "PORT": "8080", }, + Runtime: plan.RuntimeStruct{Language: "go"}, } if !absFileExists("go.sum") { diff --git a/scanner/laravel.go b/scanner/laravel.go index c3c345d28e..28bf7374bb 100644 --- a/scanner/laravel.go +++ b/scanner/laravel.go @@ -64,9 +64,11 @@ func configureLaravel(sourceDir string, config *ScannerConfig) (*SourceInfo, err if err != nil || phpVersion == "" { // Fallback to 8.0, which has // the broadest compatibility - phpVersion = "8.0" + phpVersion = "8.1" } + s.Runtime = plan.RuntimeStruct{Language: "php", Version: phpVersion} + s.BuildArgs = map[string]string{ "PHP_VERSION": phpVersion, "NODE_VERSION": "18", diff --git a/scanner/lucky.go b/scanner/lucky.go index 77228d6dcd..e1dad34ab4 100644 --- a/scanner/lucky.go +++ b/scanner/lucky.go @@ -2,6 +2,7 @@ package scanner import ( "github.com/superfly/flyctl/helpers" + "github.com/superfly/flyctl/internal/command/launch/plan" ) func configureLucky(sourceDir string, config *ScannerConfig) (*SourceInfo, error) { @@ -39,6 +40,7 @@ func configureLucky(sourceDir string, config *ScannerConfig) (*SourceInfo, error UrlPrefix: "/", }, }, + Runtime: plan.RuntimeStruct{Language: "crystal"}, } return s, nil diff --git a/scanner/phoenix.go b/scanner/phoenix.go index 90ba24796f..c5d9317c20 100644 --- a/scanner/phoenix.go +++ b/scanner/phoenix.go @@ -33,6 +33,7 @@ func configurePhoenix(sourceDir string, config *ScannerConfig) (*SourceInfo, err }, }, }, + Runtime: plan.RuntimeStruct{Language: "elixir"}, } // Detect if --copy-config and --now flags are set. If so, limited set of diff --git a/scanner/python.go b/scanner/python.go index 77870ef9a0..b832d86d8d 100644 --- a/scanner/python.go +++ b/scanner/python.go @@ -11,6 +11,7 @@ import ( "github.com/pkg/errors" "github.com/pelletier/go-toml/v2" + "github.com/superfly/flyctl/internal/command/launch/plan" "github.com/superfly/flyctl/terminal" ) @@ -138,6 +139,9 @@ func intoSource(cfg PyCfg) (*SourceInfo, error) { return nil, nil } } + + runtime := plan.RuntimeStruct{Language: "python"} + vars[string(cfg.depStyle)] = true objectStorage := slices.Contains(cfg.deps, "boto3") || slices.Contains(cfg.deps, "boto") if app == "" { @@ -150,6 +154,7 @@ func intoSource(cfg PyCfg) (*SourceInfo, error) { Family: "FastAPI", Port: 8000, ObjectStorageDesired: objectStorage, + Runtime: runtime, }, nil } else if app == Flask { vars["flask"] = true @@ -158,6 +163,7 @@ func intoSource(cfg PyCfg) (*SourceInfo, error) { Family: "Flask", Port: 8080, ObjectStorageDesired: objectStorage, + Runtime: runtime, }, nil } else if app == Streamlit { vars["streamlit"] = true @@ -172,6 +178,7 @@ func intoSource(cfg PyCfg) (*SourceInfo, error) { Family: "Streamlit", Port: 8501, ObjectStorageDesired: objectStorage, + Runtime: runtime, }, nil } else { return nil, nil diff --git a/scanner/rust.go b/scanner/rust.go index fdf34d7be1..85babb1b0c 100644 --- a/scanner/rust.go +++ b/scanner/rust.go @@ -1,5 +1,7 @@ package scanner +import "github.com/superfly/flyctl/internal/command/launch/plan" + func configureRust(sourceDir string, _ *ScannerConfig) (*SourceInfo, error) { if !checksPass(sourceDir, fileExists("Cargo.toml", "Cargo.lock")) { return nil, nil @@ -39,6 +41,7 @@ func configureRust(sourceDir string, _ *ScannerConfig) (*SourceInfo, error) { Port: 8080, Env: env, SkipDatabase: true, + Runtime: plan.RuntimeStruct{Language: "rust"}, } return s, nil } From 306980e3f0654fda8e895310b0431f9fe791ccc7 Mon Sep 17 00:00:00 2001 From: Kathryn Anne S Tan Date: Tue, 20 Aug 2024 01:16:18 +0300 Subject: [PATCH 024/104] Scan composer file for php version, fallback to artisan version if cannot find --- scanner/laravel.go | 47 +++++++++++++++++++++++++++++++++------------- 1 file changed, 34 insertions(+), 13 deletions(-) diff --git a/scanner/laravel.go b/scanner/laravel.go index 6aaf923d04..31899eb95f 100644 --- a/scanner/laravel.go +++ b/scanner/laravel.go @@ -207,21 +207,42 @@ Now: run 'fly deploy' to deploy your %s app. } func extractPhpVersion() (string, error) { - /* Example Output: - PHP 8.1.8 (cli) (built: Jul 8 2022 10:58:31) (NTS) - Copyright (c) The PHP Group - Zend Engine v4.1.8, Copyright (c) Zend Technologies - with Zend OPcache v8.1.8, Copyright (c), by Zend Technologies - */ - cmd := exec.Command("php", "-v") - out, err := cmd.CombinedOutput() - if err != nil { - return "", err + /* VIA composer.json file */ + // Capture major/minor version (leaving out revision version) + re := regexp.MustCompile(`([0-9]+\.[0-9]+)`) + var match = re.FindStringSubmatch("") + + data, err := os.ReadFile("composer.json") + if err == nil { + var composerJson map[string]interface{} + err = json.Unmarshal(data, &composerJson) + if err == nil { + // check for the package in the composer.json + require, ok := composerJson["require"].(map[string]interface{}) + if ok && require["php"] != nil { + str := fmt.Sprint(require["php"] ) + match = re.FindStringSubmatch(str) + } + } } - // Capture major/minor version (leaving out revision version) - re := regexp.MustCompile(`PHP ([0-9]+\.[0-9]+)\.[0-9]`) - match := re.FindStringSubmatch(string(out)) + if len(match)==0{ + /* VIA php artisan version: + PHP 8.1.8 (cli) (built: Jul 8 2022 10:58:31) (NTS) + Copyright (c) The PHP Group + Zend Engine v4.1.8, Copyright (c) Zend Technologies + with Zend OPcache v8.1.8, Copyright (c), by Zend Technologies + */ + cmd := exec.Command("php", "-v") + out, err := cmd.CombinedOutput() + if err != nil { + return "", err + } + + // Capture major/minor version (leaving out revision version) + re := regexp.MustCompile(`PHP ([0-9]+\.[0-9]+)\.[0-9]`) + match = re.FindStringSubmatch(string(out)) + } if len(match) > 1 { // If the PHP version is below 7.4, we won't have a From bbf03538f2ac25fbb346ab62a2700790dc962118 Mon Sep 17 00:00:00 2001 From: Kathryn Anne S Tan Date: Tue, 20 Aug 2024 01:23:38 +0300 Subject: [PATCH 025/104] fix trailing spaces, fix gofmt error --- scanner/laravel.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scanner/laravel.go b/scanner/laravel.go index 31899eb95f..ff41d51711 100644 --- a/scanner/laravel.go +++ b/scanner/laravel.go @@ -220,14 +220,14 @@ func extractPhpVersion() (string, error) { // check for the package in the composer.json require, ok := composerJson["require"].(map[string]interface{}) if ok && require["php"] != nil { - str := fmt.Sprint(require["php"] ) + str := fmt.Sprint(require["php"]) match = re.FindStringSubmatch(str) } } } - if len(match)==0{ - /* VIA php artisan version: + if len(match) == 0 { + /* VIA php artisan version: PHP 8.1.8 (cli) (built: Jul 8 2022 10:58:31) (NTS) Copyright (c) The PHP Group Zend Engine v4.1.8, Copyright (c) Zend Technologies From 63261c530cc4b53125d319fc4e052f1315d5650d Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Tue, 20 Aug 2024 10:38:14 -0400 Subject: [PATCH 026/104] write manifest to fix, switch customization opt-out instead of opt-in, handle python projects --- deploy.rb | 26 ++++++++++++++---------- deployer.Dockerfile | 2 +- internal/command/launch/cmd.go | 13 +++++++++++- internal/command/launch/plan_commands.go | 6 ++++++ 4 files changed, 34 insertions(+), 13 deletions(-) diff --git a/deploy.rb b/deploy.rb index eff06fd3c1..095125a80e 100755 --- a/deploy.rb +++ b/deploy.rb @@ -7,8 +7,6 @@ require 'securerandom' require 'fileutils' -puts ENV["PATH"] - LOG_PREFIX = ENV["LOG_PREFIX"] module Step @@ -165,7 +163,7 @@ def get_env(name, default = nil) event :start, { ts: ts() } DEPLOY_NOW = !get_env("DEPLOY_NOW").nil? -DEPLOY_CUSTOMIZE = !get_env("DEPLOY_CUSTOMIZE").nil? +DEPLOY_CUSTOMIZE = !get_env("NO_DEPLOY_CUSTOMIZE") DEPLOY_APP_NAME = get_env("DEPLOY_APP_NAME") if !DEPLOY_CUSTOMIZE && !DEPLOY_APP_NAME @@ -229,8 +227,10 @@ def get_env(name, default = nil) end end +MANIFEST_PATH = "/tmp/manifest.json" + manifest = in_step Step::PLAN do - cmd = "flyctl launch plan propose --force-name" + cmd = "flyctl launch plan propose --force-name --manifest-path #{MANIFEST_PATH}" if (slug = DEPLOY_ORG_SLUG) cmd += " --org #{slug}" @@ -246,7 +246,9 @@ def get_env(name, default = nil) # cmd += " --copy-config" if get_env("DEPLOY_COPY_CONFIG") - raw_manifest = exec_capture("#{cmd}").chomp + exec_capture("#{cmd}").chomp + + raw_manifest = File.read(MANIFEST_PATH) begin manifest = JSON.parse(raw_manifest) @@ -255,8 +257,6 @@ def get_env(name, default = nil) exit 1 end - File.write("/tmp/manifest.json", manifest.to_json) - artifact Artifact::MANIFEST, manifest manifest @@ -301,7 +301,7 @@ def get_env(name, default = nil) exit 1 end - version = RUNTIME_VERSION || version + version = RUNTIME_VERSION.empty? ? version : RUNTIME_VERSION if ASDF_SUPPORTED_FLYCTL_LANGUAGES.include?(RUNTIME_LANGUAGE) plugin = FLYCTL_TO_ASDF_PLUGIN_NAME.fetch(RUNTIME_LANGUAGE, RUNTIME_LANGUAGE) @@ -325,6 +325,10 @@ def get_env(name, default = nil) exec_capture("curl -sS https://getcomposer.org/installer -o /tmp/composer-setup.php") # TODO: verify signature? exec_capture("php /tmp/composer-setup.php --install-dir=/usr/local/bin --filename=composer") + when "python" + major, minor = Gem::Version.new(version).segments + python_version = "#{major}.#{minor}" + exec_capture("mise use -g python@#{python_version}") else # we should never get here, but handle it in case! event :error, { type: :unsupported_version, message: "no handler for runtime: #{RUNTIME_LANGUAGE}, supported: #{DEFAULT_RUNTIME_VERSIONS.keys.join(", ")}" } @@ -335,7 +339,7 @@ def get_env(name, default = nil) end in_step Step::GENERATE_BUILD_REQUIREMENTS do - exec_capture("flyctl launch plan generate /tmp/manifest.json") + exec_capture("flyctl launch plan generate #{MANIFEST_PATH}") exec_capture("git add -A") diff = exec_capture("git diff --cached") artifact Artifact::DIFF, diff @@ -344,14 +348,14 @@ def get_env(name, default = nil) if DEPLOY_CUSTOMIZE manifest = in_step Step::CUSTOMIZE do - cmd = "flyctl launch sessions create --session-path /tmp/session.json --manifest-path /tmp/manifest.json --from-manifest /tmp/manifest.json" + cmd = "flyctl launch sessions create --session-path /tmp/session.json --manifest-path #{MANIFEST_PATH} --from-manifest #{MANIFEST_PATH}" exec_capture(cmd) session = JSON.parse(File.read("/tmp/session.json")) artifact Artifact::SESSION, session - cmd = "flyctl launch sessions finalize --session-path /tmp/session.json --manifest-path /tmp/manifest.json" + cmd = "flyctl launch sessions finalize --session-path /tmp/session.json --manifest-path #{MANIFEST_PATH}" exec_capture(cmd) manifest = JSON.parse(File.read("/tmp/manifest.json")) diff --git a/deployer.Dockerfile b/deployer.Dockerfile index fa94d20319..2b98e61179 100644 --- a/deployer.Dockerfile +++ b/deployer.Dockerfile @@ -34,7 +34,7 @@ RUN /bin/bash -lc 'rvm install $DEFAULT_RUBY_VERSION && rvm --default use $DEFAU # install mise RUN curl https://mise.run | MISE_VERSION=v2024.8.6 sh && \ - echo -e "\n\nexport PATH=\"$HOME/.local/share/mise/shims:$PATH\"" >> ~/.profile + echo -e "\n\nexport PATH=\"$HOME/.local/bin:$HOME/.local/share/mise/shims:$PATH\"" >> ~/.profile ENV MISE_PYTHON_COMPILE=false diff --git a/internal/command/launch/cmd.go b/internal/command/launch/cmd.go index f32f275dcc..5ca940842b 100644 --- a/internal/command/launch/cmd.go +++ b/internal/command/launch/cmd.go @@ -322,7 +322,18 @@ func run(ctx context.Context) (err error) { } if flag.GetBool(ctx, "manifest") { - jsonEncoder := json.NewEncoder(io.Out) + var jsonEncoder *json.Encoder + if manifestPath := flag.GetString(ctx, "manifest-path"); manifestPath != "" { + file, err := os.OpenFile(manifestPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755) + if err != nil { + return err + } + defer file.Close() + + jsonEncoder = json.NewEncoder(file) + } else { + jsonEncoder = json.NewEncoder(io.Out) + } jsonEncoder.SetIndent("", " ") return jsonEncoder.Encode(launchManifest) } diff --git a/internal/command/launch/plan_commands.go b/internal/command/launch/plan_commands.go index b8e9d1f71a..a241e69b9c 100644 --- a/internal/command/launch/plan_commands.go +++ b/internal/command/launch/plan_commands.go @@ -64,6 +64,12 @@ func newPropose() *cobra.Command { Description: "Use the configuration file if present without prompting", Default: false, }, + flag.String{ + Name: "manifest-path", + Description: "Path to write the manifest to", + Default: "", + Hidden: true, + }, ) return cmd From a62607a863fde35aac50c88979ca63453cd88e1b Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Wed, 28 Aug 2024 15:03:05 -0400 Subject: [PATCH 027/104] don't log everything, re-order steps, always do JSON for artifacts --- deploy.rb | 47 ++++++++++++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 21 deletions(-) diff --git a/deploy.rb b/deploy.rb index 095125a80e..1e1873f618 100755 --- a/deploy.rb +++ b/deploy.rb @@ -94,7 +94,7 @@ def error(msg) log("error", msg) end -def exec_capture(cmd, display = nil) +def exec_capture(cmd, display: nil, log: true) cmd_display = display || cmd event :exec, { cmd: cmd_display } @@ -112,7 +112,9 @@ def exec_capture(cmd, display = nil) Thread.new do Step.set_current(step) # in_step would be a problem here, we just need to that the thread with the parent thread's step! stream.each_line do |line| - nputs type: stream_name, payload: line.chomp + if log + nputs type: stream_name, payload: line.chomp + end out_mutex.synchronize { output += line } end end @@ -208,20 +210,20 @@ def get_env(name, default = nil) ref = get_env("GIT_REF") artifact Artifact::GIT_INFO, { repository: GIT_REPO, reference: ref } - exec_capture("git init") + exec_capture("git init", log: false) redacted_repo_url = GIT_REPO_URL.dup redacted_repo_url.user = nil redacted_repo_url.password = nil - exec_capture("git remote add origin #{GIT_REPO_URL.to_s}", "git remote add origin #{redacted_repo_url.to_s}") + exec_capture("git remote add origin #{GIT_REPO_URL.to_s}", display: "git remote add origin #{redacted_repo_url.to_s}") - ref = exec_capture("git remote show origin | sed -n '/HEAD branch/s/.*: //p'").chomp if !ref + ref = exec_capture("git remote show origin | sed -n '/HEAD branch/s/.*: //p'", log: false).chomp if !ref exec_capture("git -c protocol.version=2 fetch origin #{ref}") exec_capture("git reset --hard --recurse-submodules FETCH_HEAD") - head = JSON.parse(exec_capture("git log -1 --pretty=format:'{\"commit\": \"%H\", \"author\": \"%an\", \"author_email\": \"%ae\", \"date\": \"%ad\", \"message\": \"%f\"}'")) + head = JSON.parse(exec_capture("git log -1 --pretty=format:'{\"commit\": \"%H\", \"author\": \"%an\", \"author_email\": \"%ae\", \"date\": \"%ad\", \"message\": \"%f\"}'", log: false)) artifact Artifact::GIT_HEAD, head end @@ -230,23 +232,23 @@ def get_env(name, default = nil) MANIFEST_PATH = "/tmp/manifest.json" manifest = in_step Step::PLAN do - cmd = "flyctl launch plan propose --force-name --manifest-path #{MANIFEST_PATH}" + cmd = "flyctl launch plan propose --manifest-path #{MANIFEST_PATH}" if (slug = DEPLOY_ORG_SLUG) cmd += " --org #{slug}" end if (name = DEPLOY_APP_NAME) - cmd += " --name #{name}" + cmd += " --force-name --name #{name}" end if (region = DEPLOY_APP_REGION) cmd += " --region #{region}" end - # cmd += " --copy-config" if get_env("DEPLOY_COPY_CONFIG") + cmd += " --copy-config" if get_env("DEPLOY_COPY_CONFIG") - exec_capture("#{cmd}").chomp + exec_capture(cmd).chomp raw_manifest = File.read(MANIFEST_PATH) @@ -268,9 +270,9 @@ def get_env(name, default = nil) RUNTIME_VERSION = manifest.dig("plan", "runtime", "version") DO_INSTALL_DEPS = REQUIRES_DEPENDENCIES.include?(RUNTIME_LANGUAGE) +DO_GEN_REQS = !RUNTIME_LANGUAGE.empty? -steps.push({id: Step::INSTALL_DEPENDENCIES, description: "Install required dependencies"}) if DO_INSTALL_DEPS -steps.push({id: Step::GENERATE_BUILD_REQUIREMENTS, description: "Generate requirements for build"}) +steps.push({id: Step::INSTALL_DEPENDENCIES, description: "Install required dependencies", async: true}) if DO_INSTALL_DEPS DEFAULT_ERLANG_VERSION = get_env("DEFAULT_ERLANG_VERSION", "26.2.5.2") @@ -337,13 +339,6 @@ def get_env(name, default = nil) end end end - - in_step Step::GENERATE_BUILD_REQUIREMENTS do - exec_capture("flyctl launch plan generate #{MANIFEST_PATH}") - exec_capture("git add -A") - diff = exec_capture("git diff --cached") - artifact Artifact::DIFF, diff - end end if DEPLOY_CUSTOMIZE @@ -379,6 +374,7 @@ def get_env(name, default = nil) TIGRIS = manifest.dig("plan", "object_storage", "tigris_object_storage") SENTRY = manifest.dig("plan", "sentry") == true +steps.push({id: Step::GENERATE_BUILD_REQUIREMENTS, description: "Generate requirements for build"}) if DO_GEN_REQS steps.push({id: Step::BUILD, description: "Build image"}) if GIT_REPO steps.push({id: Step::FLY_POSTGRES_CREATE, description: "Create and attach PostgreSQL database"}) if FLY_PG steps.push({id: Step::SUPABASE_POSTGRES, description: "Create Supabase PostgreSQL database"}) if SUPABASE @@ -393,6 +389,15 @@ def get_env(name, default = nil) # Join the parallel task thread deps_thread.join +if DO_GEN_REQS + in_step Step::GENERATE_BUILD_REQUIREMENTS do + exec_capture("flyctl launch plan generate #{MANIFEST_PATH}") + exec_capture("git add -A", log: false) + diff = exec_capture("git diff --cached", log: false) + artifact Artifact::DIFF, { output: diff } + end +end + image_tag = SecureRandom.hex(16) image_ref = in_step Step::BUILD do @@ -402,8 +407,8 @@ def get_env(name, default = nil) else image_ref = "registry.fly.io/#{APP_NAME}:#{image_tag}" - exec_capture("flyctl deploy -a #{APP_NAME} -c /tmp/fly.json --build-only --push --image-label #{image_tag}") - artifact Artifact::DOCKER_IMAGE, image_ref + exec_capture("flyctl deploy --build-only --push -a #{APP_NAME} -c /tmp/fly.json --image-label #{image_tag}") + artifact Artifact::DOCKER_IMAGE, { ref: image_ref } image_ref end end From 6fcadd1b27c7d55ba569ce0cf955ed8f26100e8f Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Wed, 11 Sep 2024 10:02:11 -0400 Subject: [PATCH 028/104] add DEPLOY_ONLY env to prevent all the planning of a launch --- deploy.rb | 441 +++++++++++-------------------- deploy/common.rb | 159 +++++++++++ deployer.Dockerfile | 3 +- deployer.Dockerfile.dockerignore | 3 +- 4 files changed, 313 insertions(+), 293 deletions(-) create mode 100644 deploy/common.rb diff --git a/deploy.rb b/deploy.rb index 1e1873f618..ef9342de16 100755 --- a/deploy.rb +++ b/deploy.rb @@ -1,171 +1,15 @@ #!/usr/bin/env ruby -require 'json' -require 'time' -require 'open3' -require 'uri' -require 'securerandom' -require 'fileutils' - -LOG_PREFIX = ENV["LOG_PREFIX"] - -module Step - ROOT = :__root__ - GIT_PULL = :git_pull - PLAN = :plan - CUSTOMIZE = :customize - INSTALL_DEPENDENCIES = :install_dependencies - GENERATE_BUILD_REQUIREMENTS = :generate_build_requirements - BUILD = :build - FLY_POSTGRES_CREATE = :fly_postgres_create - SUPABASE_POSTGRES = :supabase_postgres - UPSTASH_REDIS = :upstash_redis - TIGRIS_OBJECT_STORAGE = :tigris_object_storage - SENTRY = :sentry - DEPLOY = :deploy - - def self.current - Thread.current[:step] ||= Step::ROOT - end - - def self.set_current(step) - Thread.current[:step] = step - end -end - -module Artifact - META = :meta - GIT_INFO = :git_info - GIT_HEAD = :git_head - MANIFEST = :manifest - SESSION = :session - DIFF = :diff - FLY_POSTGRES = :fly_postgres - SUPABASE_POSTGRES = :supabase_postgres - UPSTASH_REDIS = :upstash_redis - TIGRIS_OBJECT_STORAGE = :tigris_object_storage - SENTRY = :sentry - DOCKER_IMAGE = :docker_image -end - -$counter = 0 -$counter_mutex = Mutex.new - -def id - $counter_mutex.synchronize do - $counter += 1 - $counter - end -end - -$start = Process.clock_gettime(Process::CLOCK_MONOTONIC) - -def elapsed - Process.clock_gettime(Process::CLOCK_MONOTONIC) - $start -end - -def nputs(type:, payload: nil) - obj = { id: id(), step: Step.current(), type: type, time: elapsed(), payload: payload }.compact - puts "#{LOG_PREFIX}#{obj.to_json}" -end - -# prefixed events -def event(name, meta = nil) - nputs(type: "event:#{name}", payload: meta) -end - -def artifact(name, body) - nputs(type: "artifact:#{name}", payload: body) -end - -def log(level, msg) - nputs(type: "log:#{level}", payload: msg) -end - -def info(msg) - log("info", msg) -end - -def debug(msg) - log("debug", msg) -end - -def error(msg) - log("error", msg) -end - -def exec_capture(cmd, display: nil, log: true) - cmd_display = display || cmd - event :exec, { cmd: cmd_display } - - out_mutex = Mutex.new - output = "" - - status = Open3.popen3(cmd) do |stdin, stdout, stderr, wait_thr| - pid = wait_thr.pid - - stdin.close_write - - step = Step.current - - threads = [[stdout, "stdout"], [stderr, "stderr"]].map do |stream, stream_name| - Thread.new do - Step.set_current(step) # in_step would be a problem here, we just need to that the thread with the parent thread's step! - stream.each_line do |line| - if log - nputs type: stream_name, payload: line.chomp - end - out_mutex.synchronize { output += line } - end - end - end - - threads.each { |thr| thr.join } - - wait_thr.value - end - - if !status.success? - event :error, { type: :exec, message: "unsuccessful command '#{cmd_display}'", exit_code: status.exitstatus, pid: status.pid } - exit 1 - end - - output -end - -def in_step(step, &block) - old_step = Step.current() - Step.set_current(step) - event :start - ret = begin - yield block - rescue StandardError => e - event :error, { type: :uncaught, message: e } - exit 1 - end - event :end - Step.set_current(old_step) - ret -end - -def ts - Time.now.utc.iso8601(6) -end - -def get_env(name, default = nil) - value = ENV[name]&.strip - if value.nil? || value.empty? - return nil || default - end - value -end - -# start of actual logic +require './deploy/common' event :start, { ts: ts() } +# Change to a directory where we'll pull on git +Dir.chdir("/usr/src/app") + DEPLOY_NOW = !get_env("DEPLOY_NOW").nil? DEPLOY_CUSTOMIZE = !get_env("NO_DEPLOY_CUSTOMIZE") +DEPLOY_ONLY = !get_env("DEPLOY_ONLY").nil? DEPLOY_APP_NAME = get_env("DEPLOY_APP_NAME") if !DEPLOY_CUSTOMIZE && !DEPLOY_APP_NAME @@ -201,9 +45,19 @@ def get_env(name, default = nil) steps = [] +# Whatever happens, we try to git pull if a GIT_REPO is specified steps.push({id: Step::GIT_PULL, description: "Setup and pull from git repository"}) if GIT_REPO -steps.push({id: Step::PLAN, description: "Prepare deployment plan"}) -steps.push({id: Step::CUSTOMIZE, description: "Customize deployment plan"}) if DEPLOY_CUSTOMIZE + +if !DEPLOY_ONLY + # we're not just deploying, we're also `fly launch`-ing + steps.push({id: Step::PLAN, description: "Prepare deployment plan"}) + steps.push({id: Step::CUSTOMIZE, description: "Customize deployment plan"}) if DEPLOY_CUSTOMIZE +else + # only deploying, so we need to send the artifacts right away + steps.push({id: Step::BUILD, description: "Build image"}) if GIT_REPO + steps.push({id: Step::DEPLOY, description: "Deploy application"}) if DEPLOY_NOW + artifact Artifact::META, { steps: steps } +end if GIT_REPO_URL in_step Step::GIT_PULL do @@ -229,179 +83,184 @@ def get_env(name, default = nil) end end -MANIFEST_PATH = "/tmp/manifest.json" +if !DEPLOY_ONLY + MANIFEST_PATH = "/tmp/manifest.json" -manifest = in_step Step::PLAN do - cmd = "flyctl launch plan propose --manifest-path #{MANIFEST_PATH}" + manifest = in_step Step::PLAN do + cmd = "flyctl launch plan propose --manifest-path #{MANIFEST_PATH}" - if (slug = DEPLOY_ORG_SLUG) - cmd += " --org #{slug}" - end + if (slug = DEPLOY_ORG_SLUG) + cmd += " --org #{slug}" + end - if (name = DEPLOY_APP_NAME) - cmd += " --force-name --name #{name}" - end + if (name = DEPLOY_APP_NAME) + cmd += " --force-name --name #{name}" + end - if (region = DEPLOY_APP_REGION) - cmd += " --region #{region}" - end + if (region = DEPLOY_APP_REGION) + cmd += " --region #{region}" + end - cmd += " --copy-config" if get_env("DEPLOY_COPY_CONFIG") + cmd += " --copy-config" if get_env("DEPLOY_COPY_CONFIG") - exec_capture(cmd).chomp + exec_capture(cmd).chomp - raw_manifest = File.read(MANIFEST_PATH) + raw_manifest = File.read(MANIFEST_PATH) - begin - manifest = JSON.parse(raw_manifest) - rescue StandardError => e - event :error, { type: :json, message: e, json: raw_manifest } - exit 1 - end + begin + manifest = JSON.parse(raw_manifest) + rescue StandardError => e + event :error, { type: :json, message: e, json: raw_manifest } + exit 1 + end - artifact Artifact::MANIFEST, manifest + artifact Artifact::MANIFEST, manifest - manifest -end + manifest + end -REQUIRES_DEPENDENCIES = %w[ruby bun node elixir python php] + REQUIRES_DEPENDENCIES = %w[ruby bun node elixir python php] -RUNTIME_LANGUAGE = manifest.dig("plan", "runtime", "language") -RUNTIME_VERSION = manifest.dig("plan", "runtime", "version") + RUNTIME_LANGUAGE = manifest.dig("plan", "runtime", "language") + RUNTIME_VERSION = manifest.dig("plan", "runtime", "version") -DO_INSTALL_DEPS = REQUIRES_DEPENDENCIES.include?(RUNTIME_LANGUAGE) -DO_GEN_REQS = !RUNTIME_LANGUAGE.empty? + DO_INSTALL_DEPS = REQUIRES_DEPENDENCIES.include?(RUNTIME_LANGUAGE) + DO_GEN_REQS = !RUNTIME_LANGUAGE.empty? -steps.push({id: Step::INSTALL_DEPENDENCIES, description: "Install required dependencies", async: true}) if DO_INSTALL_DEPS + steps.push({id: Step::INSTALL_DEPENDENCIES, description: "Install required dependencies", async: true}) if DO_INSTALL_DEPS -DEFAULT_ERLANG_VERSION = get_env("DEFAULT_ERLANG_VERSION", "26.2.5.2") + DEFAULT_ERLANG_VERSION = get_env("DEFAULT_ERLANG_VERSION", "26.2.5.2") -DEFAULT_RUNTIME_VERSIONS = { - "ruby" => get_env("DEFAULT_RUBY_VERSION", "3.1.6"), - "elixir" => get_env("DEFAULT_ELIXIR_VERSION", "1.16"), - "erlang" => DEFAULT_ERLANG_VERSION, - "node" => get_env("DEFAULT_NODE_VERSION", "20.16.0"), - "bun" => get_env("DEFAULT_BUN_VERSION", "1.1.24"), - "php" => get_env("DEFAULT_PHP_VERSION", "8.1"), - "python" => get_env("DEFAULT_PYTHON_VERSION", "3.12") -} + DEFAULT_RUNTIME_VERSIONS = { + "ruby" => get_env("DEFAULT_RUBY_VERSION", "3.1.6"), + "elixir" => get_env("DEFAULT_ELIXIR_VERSION", "1.16"), + "erlang" => DEFAULT_ERLANG_VERSION, + "node" => get_env("DEFAULT_NODE_VERSION", "20.16.0"), + "bun" => get_env("DEFAULT_BUN_VERSION", "1.1.24"), + "php" => get_env("DEFAULT_PHP_VERSION", "8.1"), + "python" => get_env("DEFAULT_PYTHON_VERSION", "3.12") + } -ASDF_SUPPORTED_FLYCTL_LANGUAGES = %w[ bun node elixir ] -FLYCTL_TO_ASDF_PLUGIN_NAME = { - "node" => "nodejs" -} + ASDF_SUPPORTED_FLYCTL_LANGUAGES = %w[ bun node elixir ] + FLYCTL_TO_ASDF_PLUGIN_NAME = { + "node" => "nodejs" + } -INSTALLABLE_PHP_VERSIONS = %w[ 5.6 7.0 7.1 7.2 7.3 7.4 8.0 8.1 8.2 8.3 8.4 ] + INSTALLABLE_PHP_VERSIONS = %w[ 5.6 7.0 7.1 7.2 7.3 7.4 8.0 8.1 8.2 8.3 8.4 ] -deps_thread = Thread.new do - if DO_INSTALL_DEPS - in_step Step::INSTALL_DEPENDENCIES do - # get the version - version = DEFAULT_RUNTIME_VERSIONS[RUNTIME_LANGUAGE] - if version.nil? - event :error, { type: :unsupported_version, message: "unhandled runtime: #{RUNTIME_LANGUAGE}, supported: #{DEFAULT_RUNTIME_VERSIONS.keys.join(", ")}" } - exit 1 - end + deps_thread = Thread.new do + if DO_INSTALL_DEPS + in_step Step::INSTALL_DEPENDENCIES do + # get the version + version = DEFAULT_RUNTIME_VERSIONS[RUNTIME_LANGUAGE] + if version.nil? + event :error, { type: :unsupported_version, message: "unhandled runtime: #{RUNTIME_LANGUAGE}, supported: #{DEFAULT_RUNTIME_VERSIONS.keys.join(", ")}" } + exit 1 + end - version = RUNTIME_VERSION.empty? ? version : RUNTIME_VERSION + version = RUNTIME_VERSION.empty? ? version : RUNTIME_VERSION - if ASDF_SUPPORTED_FLYCTL_LANGUAGES.include?(RUNTIME_LANGUAGE) - plugin = FLYCTL_TO_ASDF_PLUGIN_NAME.fetch(RUNTIME_LANGUAGE, RUNTIME_LANGUAGE) - if plugin == "elixir" - # required for elixir to work - exec_capture("asdf install erlang #{DEFAULT_ERLANG_VERSION}") - end - exec_capture("asdf install #{plugin} #{version}") - else - case RUNTIME_LANGUAGE - when "ruby" - exec_capture("rvm install #{version}") - when "php" - major, minor = Gem::Version.new(version).segments - php_version = "#{major}.#{minor}" - if !INSTALLABLE_PHP_VERSIONS.include?(php_version) - event :error, { type: :unsupported_version, message: "unsupported PHP version #{version}, supported versions are: #{INSTALLABLE_PHP_VERSIONS.join(", ")}" } - exit 1 + if ASDF_SUPPORTED_FLYCTL_LANGUAGES.include?(RUNTIME_LANGUAGE) + plugin = FLYCTL_TO_ASDF_PLUGIN_NAME.fetch(RUNTIME_LANGUAGE, RUNTIME_LANGUAGE) + if plugin == "elixir" + # required for elixir to work + exec_capture("asdf install erlang #{DEFAULT_ERLANG_VERSION}") end - exec_capture("apt install --no-install-recommends -y php#{php_version} php#{php_version}-curl php#{php_version}-mbstring php#{php_version}-xml") - exec_capture("curl -sS https://getcomposer.org/installer -o /tmp/composer-setup.php") - # TODO: verify signature? - exec_capture("php /tmp/composer-setup.php --install-dir=/usr/local/bin --filename=composer") - when "python" - major, minor = Gem::Version.new(version).segments - python_version = "#{major}.#{minor}" - exec_capture("mise use -g python@#{python_version}") + exec_capture("asdf install #{plugin} #{version}") else - # we should never get here, but handle it in case! - event :error, { type: :unsupported_version, message: "no handler for runtime: #{RUNTIME_LANGUAGE}, supported: #{DEFAULT_RUNTIME_VERSIONS.keys.join(", ")}" } - exit 1 + case RUNTIME_LANGUAGE + when "ruby" + exec_capture("rvm install #{version}") + when "php" + major, minor = Gem::Version.new(version).segments + php_version = "#{major}.#{minor}" + if !INSTALLABLE_PHP_VERSIONS.include?(php_version) + event :error, { type: :unsupported_version, message: "unsupported PHP version #{version}, supported versions are: #{INSTALLABLE_PHP_VERSIONS.join(", ")}" } + exit 1 + end + exec_capture("apt install --no-install-recommends -y php#{php_version} php#{php_version}-curl php#{php_version}-mbstring php#{php_version}-xml") + exec_capture("curl -sS https://getcomposer.org/installer -o /tmp/composer-setup.php") + # TODO: verify signature? + exec_capture("php /tmp/composer-setup.php --install-dir=/usr/local/bin --filename=composer") + when "python" + major, minor = Gem::Version.new(version).segments + python_version = "#{major}.#{minor}" + exec_capture("mise use -g python@#{python_version}") + else + # we should never get here, but handle it in case! + event :error, { type: :unsupported_version, message: "no handler for runtime: #{RUNTIME_LANGUAGE}, supported: #{DEFAULT_RUNTIME_VERSIONS.keys.join(", ")}" } + exit 1 + end end end end end -end -if DEPLOY_CUSTOMIZE - manifest = in_step Step::CUSTOMIZE do - cmd = "flyctl launch sessions create --session-path /tmp/session.json --manifest-path #{MANIFEST_PATH} --from-manifest #{MANIFEST_PATH}" + if DEPLOY_CUSTOMIZE + manifest = in_step Step::CUSTOMIZE do + cmd = "flyctl launch sessions create --session-path /tmp/session.json --manifest-path #{MANIFEST_PATH} --from-manifest #{MANIFEST_PATH}" - exec_capture(cmd) - session = JSON.parse(File.read("/tmp/session.json")) + exec_capture(cmd) + session = JSON.parse(File.read("/tmp/session.json")) - artifact Artifact::SESSION, session + artifact Artifact::SESSION, session - cmd = "flyctl launch sessions finalize --session-path /tmp/session.json --manifest-path #{MANIFEST_PATH}" + cmd = "flyctl launch sessions finalize --session-path /tmp/session.json --manifest-path #{MANIFEST_PATH}" - exec_capture(cmd) - manifest = JSON.parse(File.read("/tmp/manifest.json")) + exec_capture(cmd) + manifest = JSON.parse(File.read("/tmp/manifest.json")) - artifact Artifact::MANIFEST, manifest + artifact Artifact::MANIFEST, manifest - manifest + manifest + end end -end -# Write the fly config file to a tmp directory -File.write("/tmp/fly.json", manifest["config"].to_json) + # Write the fly config file to a tmp directory + File.write("/tmp/fly.json", manifest["config"].to_json) -APP_NAME = manifest["config"]["app"] -ORG_SLUG = manifest["plan"]["org"] -APP_REGION = manifest["plan"]["region"] + ORG_SLUG = manifest["plan"]["org"] + APP_REGION = manifest["plan"]["region"] -FLY_PG = manifest.dig("plan", "postgres", "fly_postgres") -SUPABASE = manifest.dig("plan", "postgres", "supabase_postgres") -UPSTASH = manifest.dig("plan", "redis", "upstash_redis") -TIGRIS = manifest.dig("plan", "object_storage", "tigris_object_storage") -SENTRY = manifest.dig("plan", "sentry") == true + FLY_PG = manifest.dig("plan", "postgres", "fly_postgres") + SUPABASE = manifest.dig("plan", "postgres", "supabase_postgres") + UPSTASH = manifest.dig("plan", "redis", "upstash_redis") + TIGRIS = manifest.dig("plan", "object_storage", "tigris_object_storage") + SENTRY = manifest.dig("plan", "sentry") == true -steps.push({id: Step::GENERATE_BUILD_REQUIREMENTS, description: "Generate requirements for build"}) if DO_GEN_REQS -steps.push({id: Step::BUILD, description: "Build image"}) if GIT_REPO -steps.push({id: Step::FLY_POSTGRES_CREATE, description: "Create and attach PostgreSQL database"}) if FLY_PG -steps.push({id: Step::SUPABASE_POSTGRES, description: "Create Supabase PostgreSQL database"}) if SUPABASE -steps.push({id: Step::UPSTASH_REDIS, description: "Create Upstash Redis database"}) if UPSTASH -steps.push({id: Step::TIGRIS_OBJECT_STORAGE, description: "Create Tigris object storage bucket"}) if TIGRIS -steps.push({id: Step::SENTRY, description: "Create Sentry project"}) if SENTRY + steps.push({id: Step::GENERATE_BUILD_REQUIREMENTS, description: "Generate requirements for build"}) if DO_GEN_REQS + steps.push({id: Step::BUILD, description: "Build image"}) if GIT_REPO + steps.push({id: Step::FLY_POSTGRES_CREATE, description: "Create and attach PostgreSQL database"}) if FLY_PG + steps.push({id: Step::SUPABASE_POSTGRES, description: "Create Supabase PostgreSQL database"}) if SUPABASE + steps.push({id: Step::UPSTASH_REDIS, description: "Create Upstash Redis database"}) if UPSTASH + steps.push({id: Step::TIGRIS_OBJECT_STORAGE, description: "Create Tigris object storage bucket"}) if TIGRIS + steps.push({id: Step::SENTRY, description: "Create Sentry project"}) if SENTRY -steps.push({id: Step::DEPLOY, description: "Deploy application"}) if DEPLOY_NOW + steps.push({id: Step::DEPLOY, description: "Deploy application"}) if DEPLOY_NOW -artifact Artifact::META, { steps: steps } + artifact Artifact::META, { steps: steps } -# Join the parallel task thread -deps_thread.join + # Join the parallel task thread + deps_thread.join -if DO_GEN_REQS - in_step Step::GENERATE_BUILD_REQUIREMENTS do - exec_capture("flyctl launch plan generate #{MANIFEST_PATH}") - exec_capture("git add -A", log: false) - diff = exec_capture("git diff --cached", log: false) - artifact Artifact::DIFF, { output: diff } + if DO_GEN_REQS + in_step Step::GENERATE_BUILD_REQUIREMENTS do + exec_capture("flyctl launch plan generate #{MANIFEST_PATH}") + exec_capture("git add -A", log: false) + diff = exec_capture("git diff --cached", log: false) + artifact Artifact::DIFF, { output: diff } + end end end -image_tag = SecureRandom.hex(16) +# TODO: better error if missing config +fly_config = manifest && manifest.dig("config") || JSON.parse(exec_capture("flyctl config show --local", log: false)) + +APP_NAME = DEPLOY_APP_NAME || fly_config["app"] image_ref = in_step Step::BUILD do - if (image_ref = manifest.dig("config","build","image")&.strip) && !image_ref.nil? && !image_ref.empty? + image_tag = SecureRandom.hex(16) + if (image_ref = fly_config.dig("build","image")&.strip) && !image_ref.nil? && !image_ref.empty? info("Skipping build, using image defined in fly config: #{image_ref}") image_ref else @@ -413,13 +272,13 @@ def get_env(name, default = nil) end end -if get_env("SKIP_EXTENSIONS").nil? +if DEPLOY_ONLY || get_env("SKIP_EXTENSIONS").nil? if FLY_PG in_step Step::FLY_POSTGRES_CREATE do pg_name = FLY_PG["app_name"] region = APP_REGION - cmd = "flyctl pg create --flex --org #{ORG_SLUG} --name #{pg_name} --region #{region} --yes" + cmd = "flyctl pg create --flex --org #{ORG_SLUG} --name #{pg_name} --region #{region}" if (vm_size = FLY_PG["vm_size"]) cmd += " --vm-size #{vm_size}" @@ -457,7 +316,7 @@ def get_env(name, default = nil) in_step Step::UPSTASH_REDIS do db_name = "#{APP_NAME}-redis" - cmd = "flyctl redis create --name #{db_name} --org #{ORG_SLUG} --region #{APP_REGION} --yes" + cmd = "flyctl redis create --name #{db_name} --org #{ORG_SLUG} --region #{APP_REGION}" if UPSTASH["eviction"] == true cmd += " --enable-eviction" diff --git a/deploy/common.rb b/deploy/common.rb new file mode 100644 index 0000000000..eea2436024 --- /dev/null +++ b/deploy/common.rb @@ -0,0 +1,159 @@ +require 'json' +require 'time' +require 'open3' +require 'uri' +require 'securerandom' +require 'fileutils' + +LOG_PREFIX = ENV["LOG_PREFIX"] + +module Step + ROOT = :__root__ + GIT_PULL = :git_pull + PLAN = :plan + CUSTOMIZE = :customize + INSTALL_DEPENDENCIES = :install_dependencies + GENERATE_BUILD_REQUIREMENTS = :generate_build_requirements + BUILD = :build + FLY_POSTGRES_CREATE = :fly_postgres_create + SUPABASE_POSTGRES = :supabase_postgres + UPSTASH_REDIS = :upstash_redis + TIGRIS_OBJECT_STORAGE = :tigris_object_storage + SENTRY = :sentry + DEPLOY = :deploy + + def self.current + Thread.current[:step] ||= Step::ROOT + end + + def self.set_current(step) + Thread.current[:step] = step + end +end + +module Artifact + META = :meta + GIT_INFO = :git_info + GIT_HEAD = :git_head + MANIFEST = :manifest + SESSION = :session + DIFF = :diff + FLY_POSTGRES = :fly_postgres + SUPABASE_POSTGRES = :supabase_postgres + UPSTASH_REDIS = :upstash_redis + TIGRIS_OBJECT_STORAGE = :tigris_object_storage + SENTRY = :sentry + DOCKER_IMAGE = :docker_image +end + +$counter = 0 +$counter_mutex = Mutex.new + +def id + $counter_mutex.synchronize do + $counter += 1 + $counter + end +end + +$start = Process.clock_gettime(Process::CLOCK_MONOTONIC) + +def elapsed + Process.clock_gettime(Process::CLOCK_MONOTONIC) - $start +end + +def nputs(type:, payload: nil) + obj = { id: id(), step: Step.current(), type: type, time: elapsed(), payload: payload }.compact + puts "#{LOG_PREFIX}#{obj.to_json}" +end + +# prefixed events +def event(name, meta = nil) + nputs(type: "event:#{name}", payload: meta) +end + +def artifact(name, body) + nputs(type: "artifact:#{name}", payload: body) +end + +def log(level, msg) + nputs(type: "log:#{level}", payload: msg) +end + +def info(msg) + log("info", msg) +end + +def debug(msg) + log("debug", msg) +end + +def error(msg) + log("error", msg) +end + +def exec_capture(cmd, display: nil, log: true) + cmd_display = display || cmd + event :exec, { cmd: cmd_display } + + out_mutex = Mutex.new + output = "" + + status = Open3.popen3(cmd) do |stdin, stdout, stderr, wait_thr| + pid = wait_thr.pid + + stdin.close_write + + step = Step.current + + threads = [[stdout, "stdout"], [stderr, "stderr"]].map do |stream, stream_name| + Thread.new do + Step.set_current(step) # in_step would be a problem here, we just need to that the thread with the parent thread's step! + stream.each_line do |line| + if log + nputs type: stream_name, payload: line.chomp + end + out_mutex.synchronize { output += line } + end + end + end + + threads.each { |thr| thr.join } + + wait_thr.value + end + + if !status.success? + event :error, { type: :exec, message: "unsuccessful command '#{cmd_display}'", exit_code: status.exitstatus, pid: status.pid } + exit 1 + end + + output +end + +def in_step(step, &block) + old_step = Step.current() + Step.set_current(step) + event :start + ret = begin + yield block + rescue StandardError => e + event :error, { type: :uncaught, message: e } + exit 1 + end + event :end + Step.set_current(old_step) + ret +end + +def ts + Time.now.utc.iso8601(6) +end + +def get_env(name, default = nil) + value = ENV[name]&.strip + if value.nil? || value.empty? + return nil || default + end + value +end diff --git a/deployer.Dockerfile b/deployer.Dockerfile index 2b98e61179..9aa442051c 100644 --- a/deployer.Dockerfile +++ b/deployer.Dockerfile @@ -60,8 +60,9 @@ ENV MIX_ENV=dev COPY bin/flyctl /usr/local/bin/flyctl COPY deploy.rb /deploy.rb +COPY deploy /deploy -WORKDIR /usr/src/app +RUN mkdir -p /usr/src/app # need a login shell for rvm to work properly... ENTRYPOINT ["/bin/bash", "-lc"] diff --git a/deployer.Dockerfile.dockerignore b/deployer.Dockerfile.dockerignore index 919e86fa52..03767f328c 100644 --- a/deployer.Dockerfile.dockerignore +++ b/deployer.Dockerfile.dockerignore @@ -1,4 +1,5 @@ * !/bin/flyctl -!deploy.rb \ No newline at end of file +!deploy.rb +!deploy \ No newline at end of file From fae836f9f0e5a153fedea97c8e3f8713d4c426e6 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Fri, 13 Sep 2024 14:14:17 -0400 Subject: [PATCH 029/104] fix conditional to skip extensions when using DEPLOY_ONLY --- deploy.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy.rb b/deploy.rb index ef9342de16..91dd59a8d9 100755 --- a/deploy.rb +++ b/deploy.rb @@ -272,7 +272,7 @@ end end -if DEPLOY_ONLY || get_env("SKIP_EXTENSIONS").nil? +if !DEPLOY_ONLY && get_env("SKIP_EXTENSIONS").nil? if FLY_PG in_step Step::FLY_POSTGRES_CREATE do pg_name = FLY_PG["app_name"] From 65e4ff40c907479797dd5f91642a4258b22d2de0 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Fri, 13 Sep 2024 15:03:06 -0400 Subject: [PATCH 030/104] trying to make the deployer testable via the CI --- .github/workflows/preflight.yml | 9 ++- test/preflight/deployer_test.go | 119 +++++++++++++++++++++++++++++ test/preflight/testlib/test_env.go | 4 + 3 files changed, 130 insertions(+), 2 deletions(-) create mode 100644 test/preflight/deployer_test.go diff --git a/.github/workflows/preflight.yml b/.github/workflows/preflight.yml index fe9412a79b..51bd3c1cc7 100644 --- a/.github/workflows/preflight.yml +++ b/.github/workflows/preflight.yml @@ -37,6 +37,13 @@ jobs: with: name: flyctl path: master-build + - name: Move flyctl binary to correct directory + run: | + mv master-build/flyctl bin/flyctl + chmod +x bin/flyctl + - name: Build deployer image + run: | + docker build -t fly-deployer -f deployer.Dockerfile . - name: Run preflight tests id: preflight env: @@ -48,8 +55,6 @@ jobs: FLY_FORCE_TRACE: "true" FLY_PREFLIGHT_TEST_VM_SIZE: ${{ matrix.vm_size }} run: | - mv master-build/flyctl bin/flyctl - chmod +x bin/flyctl export PATH=$PWD/bin:$PATH echo -n failed= >> $GITHUB_OUTPUT ./scripts/preflight.sh -r "${{ github.ref }}" -t "${{ matrix.parallelism }}" -i "${{ matrix.index }}" -o $GITHUB_OUTPUT diff --git a/test/preflight/deployer_test.go b/test/preflight/deployer_test.go new file mode 100644 index 0000000000..a782226f49 --- /dev/null +++ b/test/preflight/deployer_test.go @@ -0,0 +1,119 @@ +//go:build integration +// +build integration + +package preflight + +import ( + "context" + "encoding/binary" + "fmt" + "io" + "os" + "testing" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/client" + "github.com/stretchr/testify/require" + "github.com/superfly/flyctl/test/preflight/testlib" +) + +func TestDeployerDockerfile(t *testing.T) { + dockerClient, err := client.NewClientWithOpts(client.FromEnv) + if err != nil { + panic(err) + } + + f := testlib.NewTestEnvFromEnv(t) + + err = copyFixtureIntoWorkDir(f.WorkDir(), "deploy-node", []string{}) + require.NoError(t, err) + + flyTomlPath := fmt.Sprintf("%s/fly.toml", f.WorkDir()) + + appName := f.CreateRandomAppName() + require.NotEmpty(t, appName) + + err = testlib.OverwriteConfig(flyTomlPath, map[string]any{ + "app": appName, + "region": f.PrimaryRegion(), + "env": map[string]string{ + "TEST_ID": f.ID(), + }, + }) + require.NoError(t, err) + + // app required + f.Fly("apps create %s -o %s", appName, f.OrgSlug()) + + ctx := context.TODO() + + fmt.Println("creating container...") + cont, err := dockerClient.ContainerCreate(ctx, &container.Config{ + Hostname: "deployer", + Image: "fly-deployer", + Env: []string{ + fmt.Sprintf("FLY_API_TOKEN=%s", f.AccessToken()), + fmt.Sprintf("DEPLOY_ORG_SLUG=%s", f.OrgSlug()), + "DEPLOY_ONLY=1", + }, + }, &container.HostConfig{ + RestartPolicy: container.RestartPolicy{ + Name: "never", + }, + Binds: []string{fmt.Sprintf("%s:/usr/src/app", f.WorkDir())}, + }, &network.NetworkingConfig{ + EndpointsConfig: map[string]*network.EndpointSettings{}, + }, nil, fmt.Sprintf("deployer-%s", appName)) + + if err != nil { + panic(err) + } + + logs, err := dockerClient.ContainerLogs(context.Background(), cont.ID, container.LogsOptions{ + ShowStderr: true, + ShowStdout: true, + Timestamps: false, + Follow: true, + Tail: "40", + }) + if err != nil { + panic(err) + } + + defer logs.Close() + + fmt.Println("starting container...") + err = dockerClient.ContainerStart(ctx, cont.ID, container.StartOptions{}) + if err != nil { + panic(err) + } + + fmt.Printf("Container %s is created\n", cont.ID) + + defer dockerClient.ContainerRemove(ctx, cont.ID, container.RemoveOptions{ + RemoveVolumes: true, + RemoveLinks: true, + Force: true, + }) + + hdr := make([]byte, 8) + for { + _, err = logs.Read(hdr) + if err != nil { + panic(err) + } + var w io.Writer + switch hdr[0] { + case 1: + w = os.Stdout + default: + w = os.Stderr + } + count := binary.BigEndian.Uint32(hdr[4:]) + dat := make([]byte, count) + _, err = logs.Read(dat) + fmt.Fprint(w, string(dat)) + } + +} diff --git a/test/preflight/testlib/test_env.go b/test/preflight/testlib/test_env.go index f9642b23b9..8bf3a76133 100644 --- a/test/preflight/testlib/test_env.go +++ b/test/preflight/testlib/test_env.go @@ -63,6 +63,10 @@ func (f *FlyctlTestEnv) OtherRegions() []string { return f.otherRegions } +func (f *FlyctlTestEnv) AccessToken() string { + return f.originalAccessToken +} + // Great name I know func NewTestEnvFromEnvWithEnv(t testing.TB, envVariables map[string]string) *FlyctlTestEnv { tempDir := socketSafeTempDir(t) From 5c83bc732e44f99b205a97433bf97ae181a1dafa Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Fri, 13 Sep 2024 15:27:43 -0400 Subject: [PATCH 031/104] use the correct restart policy --- test/preflight/deployer_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/preflight/deployer_test.go b/test/preflight/deployer_test.go index a782226f49..f082d28607 100644 --- a/test/preflight/deployer_test.go +++ b/test/preflight/deployer_test.go @@ -59,7 +59,7 @@ func TestDeployerDockerfile(t *testing.T) { }, }, &container.HostConfig{ RestartPolicy: container.RestartPolicy{ - Name: "never", + Name: container.RestartPolicyDisabled, }, Binds: []string{fmt.Sprintf("%s:/usr/src/app", f.WorkDir())}, }, &network.NetworkingConfig{ From 225ceb877db9bf20e47c93bd6dfc13bb0547588b Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Fri, 13 Sep 2024 16:05:24 -0400 Subject: [PATCH 032/104] on eof, don't panic, try to attach stdout and stderr --- test/preflight/deployer_test.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/test/preflight/deployer_test.go b/test/preflight/deployer_test.go index f082d28607..3fd454ebf3 100644 --- a/test/preflight/deployer_test.go +++ b/test/preflight/deployer_test.go @@ -6,6 +6,7 @@ package preflight import ( "context" "encoding/binary" + "errors" "fmt" "io" "os" @@ -57,6 +58,8 @@ func TestDeployerDockerfile(t *testing.T) { fmt.Sprintf("DEPLOY_ORG_SLUG=%s", f.OrgSlug()), "DEPLOY_ONLY=1", }, + AttachStdout: true, + AttachStderr: true, }, &container.HostConfig{ RestartPolicy: container.RestartPolicy{ Name: container.RestartPolicyDisabled, @@ -101,6 +104,10 @@ func TestDeployerDockerfile(t *testing.T) { for { _, err = logs.Read(hdr) if err != nil { + if errors.Is(err, io.EOF) { + fmt.Println("EOF!") + break + } panic(err) } var w io.Writer From 9e1c271074f8facfe62589b366eb947a1d2026fb Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 16 Sep 2024 07:53:15 -0400 Subject: [PATCH 033/104] attempt to wait for container --- test/preflight/deployer_test.go | 78 ++++++++++++++++++++++++++------- 1 file changed, 61 insertions(+), 17 deletions(-) diff --git a/test/preflight/deployer_test.go b/test/preflight/deployer_test.go index 3fd454ebf3..73bde86480 100644 --- a/test/preflight/deployer_test.go +++ b/test/preflight/deployer_test.go @@ -100,27 +100,71 @@ func TestDeployerDockerfile(t *testing.T) { Force: true, }) - hdr := make([]byte, 8) - for { - _, err = logs.Read(hdr) - if err != nil { - if errors.Is(err, io.EOF) { - fmt.Println("EOF!") - break + waitCh, waitErrCh := dockerClient.ContainerWait(ctx, cont.ID, container.WaitConditionNotRunning) + + logCh := make(chan *log) + + go func() { + + hdr := make([]byte, 8) + for { + _, err = logs.Read(hdr) + if err != nil { + if errors.Is(err, io.EOF) { + fmt.Println("EOF!") + logCh <- nil + break + } + panic(err) } - panic(err) + + count := binary.BigEndian.Uint32(hdr[4:]) + dat := make([]byte, count) + _, err = logs.Read(dat) + + logCh <- &log{stream: hdr[0], data: dat} } - var w io.Writer - switch hdr[0] { - case 1: - w = os.Stdout + + }() + + logDone := false + exited := false + var exitCode int64 + var exitError error + + for { + select { + case l := <-logCh: + logDone = l == nil + if !logDone { + var w io.Writer + switch l.stream { + case 1: + w = os.Stdout + default: + w = os.Stderr + } + + fmt.Fprint(w, string(l.data)) + } + case w := <-waitCh: + exited = true + exitCode = w.StatusCode + exitError = errors.New(w.Error.Message) + case we := <-waitErrCh: + exited = true + exitError = we default: - w = os.Stderr + if exited && logDone { + fmt.Printf("container done, code: %d, error: %+v\n", exitCode, exitError) + break + } } - count := binary.BigEndian.Uint32(hdr[4:]) - dat := make([]byte, count) - _, err = logs.Read(dat) - fmt.Fprint(w, string(dat)) } } + +type log struct { + stream uint8 + data []byte +} From 2bfae94033dba63c2299ac42f26368c22c063dce Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 16 Sep 2024 08:17:19 -0400 Subject: [PATCH 034/104] check for nil error from container wait channel --- test/preflight/deployer_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/preflight/deployer_test.go b/test/preflight/deployer_test.go index 73bde86480..1635fe80bf 100644 --- a/test/preflight/deployer_test.go +++ b/test/preflight/deployer_test.go @@ -150,7 +150,9 @@ func TestDeployerDockerfile(t *testing.T) { case w := <-waitCh: exited = true exitCode = w.StatusCode - exitError = errors.New(w.Error.Message) + if w.Error != nil { + exitError = errors.New(w.Error.Message) + } case we := <-waitErrCh: exited = true exitError = we From ad9f3cee8a22949056703dec093703bacb94bbf6 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 16 Sep 2024 08:30:47 -0400 Subject: [PATCH 035/104] attempt not to loop endlessly when everything is done --- test/preflight/deployer_test.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/test/preflight/deployer_test.go b/test/preflight/deployer_test.go index 1635fe80bf..a0892564ac 100644 --- a/test/preflight/deployer_test.go +++ b/test/preflight/deployer_test.go @@ -133,6 +133,10 @@ func TestDeployerDockerfile(t *testing.T) { var exitError error for { + if exited && logDone { + fmt.Printf("container done, code: %d, error: %+v\n", exitCode, exitError) + break + } select { case l := <-logCh: logDone = l == nil @@ -156,11 +160,6 @@ func TestDeployerDockerfile(t *testing.T) { case we := <-waitErrCh: exited = true exitError = we - default: - if exited && logDone { - fmt.Printf("container done, code: %d, error: %+v\n", exitCode, exitError) - break - } } } From c438f89787a3c8b89652381523eaf05ace1627d2 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 16 Sep 2024 08:32:09 -0400 Subject: [PATCH 036/104] write all to stdout --- test/preflight/deployer_test.go | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/test/preflight/deployer_test.go b/test/preflight/deployer_test.go index a0892564ac..11c8d339c3 100644 --- a/test/preflight/deployer_test.go +++ b/test/preflight/deployer_test.go @@ -9,7 +9,6 @@ import ( "errors" "fmt" "io" - "os" "testing" "github.com/docker/docker/api/types/container" @@ -141,15 +140,15 @@ func TestDeployerDockerfile(t *testing.T) { case l := <-logCh: logDone = l == nil if !logDone { - var w io.Writer - switch l.stream { - case 1: - w = os.Stdout - default: - w = os.Stderr - } - - fmt.Fprint(w, string(l.data)) + // var w io.Writer + // switch l.stream { + // case 1: + // w = os.Stdout + // default: + // w = os.Stderr + // } + + fmt.Printf(string(l.data)) } case w := <-waitCh: exited = true From 337096edf9fe3cf478703bfde17f9a558299af3a Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 16 Sep 2024 10:15:44 -0400 Subject: [PATCH 037/104] build just once --- .github/workflows/build.yml | 13 +++++++++++++ .github/workflows/preflight.yml | 4 +--- test/preflight/deployer_test.go | 6 +++++- 3 files changed, 19 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 90304afe48..aaed8a78c7 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -34,6 +34,19 @@ jobs: name: flyctl path: dist/default_linux_amd64_v1/flyctl overwrite: true + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: https://index.docker.io/v1/ + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + - name: Deployer docker build + run: | + mkdir -p bin + cp dist/default_linux_amd64_v1/flyctl bin/flyctl + chmod +x bin/flyctl + docker build -t flyio/deployer:${{ github.ref }} -f deployer.Dockerfile . + docker push flyio/deployer:${{ github.ref }} preflight: needs: test_build diff --git a/.github/workflows/preflight.yml b/.github/workflows/preflight.yml index 51bd3c1cc7..2f5042325b 100644 --- a/.github/workflows/preflight.yml +++ b/.github/workflows/preflight.yml @@ -41,9 +41,6 @@ jobs: run: | mv master-build/flyctl bin/flyctl chmod +x bin/flyctl - - name: Build deployer image - run: | - docker build -t fly-deployer -f deployer.Dockerfile . - name: Run preflight tests id: preflight env: @@ -54,6 +51,7 @@ jobs: FLY_PREFLIGHT_TEST_NO_PRINT_HISTORY_ON_FAIL: "true" FLY_FORCE_TRACE: "true" FLY_PREFLIGHT_TEST_VM_SIZE: ${{ matrix.vm_size }} + FLY_DEPLOYER_IMAGE: "flyio/deployer:${{ github.ref }}" run: | export PATH=$PWD/bin:$PATH echo -n failed= >> $GITHUB_OUTPUT diff --git a/test/preflight/deployer_test.go b/test/preflight/deployer_test.go index 11c8d339c3..1599a80751 100644 --- a/test/preflight/deployer_test.go +++ b/test/preflight/deployer_test.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "io" + "os" "testing" "github.com/docker/docker/api/types/container" @@ -51,7 +52,7 @@ func TestDeployerDockerfile(t *testing.T) { fmt.Println("creating container...") cont, err := dockerClient.ContainerCreate(ctx, &container.Config{ Hostname: "deployer", - Image: "fly-deployer", + Image: os.Getenv("FLY_DEPLOYER_IMAGE"), Env: []string{ fmt.Sprintf("FLY_API_TOKEN=%s", f.AccessToken()), fmt.Sprintf("DEPLOY_ORG_SLUG=%s", f.OrgSlug()), @@ -162,6 +163,9 @@ func TestDeployerDockerfile(t *testing.T) { } } + require.Nil(t, exitError) + require.Zero(t, exitCode) + } type log struct { From 97d67a603f5035367e5a93a2395baba776001c1a Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 16 Sep 2024 10:27:27 -0400 Subject: [PATCH 038/104] I wanted github.sha to tag the docker image, not github.ref --- .github/workflows/build.yml | 4 ++-- .github/workflows/preflight.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index aaed8a78c7..774255d4fe 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -45,8 +45,8 @@ jobs: mkdir -p bin cp dist/default_linux_amd64_v1/flyctl bin/flyctl chmod +x bin/flyctl - docker build -t flyio/deployer:${{ github.ref }} -f deployer.Dockerfile . - docker push flyio/deployer:${{ github.ref }} + docker build -t flyio/deployer:${{ github.sha }} -f deployer.Dockerfile . + docker push flyio/deployer:${{ github.sha }} preflight: needs: test_build diff --git a/.github/workflows/preflight.yml b/.github/workflows/preflight.yml index 2f5042325b..6fa1279724 100644 --- a/.github/workflows/preflight.yml +++ b/.github/workflows/preflight.yml @@ -51,7 +51,7 @@ jobs: FLY_PREFLIGHT_TEST_NO_PRINT_HISTORY_ON_FAIL: "true" FLY_FORCE_TRACE: "true" FLY_PREFLIGHT_TEST_VM_SIZE: ${{ matrix.vm_size }} - FLY_DEPLOYER_IMAGE: "flyio/deployer:${{ github.ref }}" + FLY_DEPLOYER_IMAGE: "flyio/deployer:${{ github.sha }}" run: | export PATH=$PWD/bin:$PATH echo -n failed= >> $GITHUB_OUTPUT From 155aa3282f90426d7f230c9d24e81d8bbc17c636 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 16 Sep 2024 10:47:30 -0400 Subject: [PATCH 039/104] first, pull the deployer image --- test/preflight/deployer_test.go | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/test/preflight/deployer_test.go b/test/preflight/deployer_test.go index 1599a80751..5547a0e5f7 100644 --- a/test/preflight/deployer_test.go +++ b/test/preflight/deployer_test.go @@ -13,6 +13,7 @@ import ( "testing" "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" "github.com/stretchr/testify/require" @@ -49,10 +50,27 @@ func TestDeployerDockerfile(t *testing.T) { ctx := context.TODO() + imageRef := os.Getenv("FLY_DEPLOYER_IMAGE") + require.NotEmpty(t, imageRef) + + fmt.Println("pulling image...") + out, err := dockerClient.ImagePull(ctx, imageRef, image.PullOptions{Platform: "linux/amd64"}) + if err != nil { + panic(err) + } + + defer out.Close() + + _, err = io.Copy(os.Stdout, out) + if err != nil { + // TODO: fatal? + fmt.Printf("error copying image pull io: %v\n", err) + } + fmt.Println("creating container...") cont, err := dockerClient.ContainerCreate(ctx, &container.Config{ Hostname: "deployer", - Image: os.Getenv("FLY_DEPLOYER_IMAGE"), + Image: imageRef, Env: []string{ fmt.Sprintf("FLY_API_TOKEN=%s", f.AccessToken()), fmt.Sprintf("DEPLOY_ORG_SLUG=%s", f.OrgSlug()), From c69aa9335ede95312e2843e357eaa79b3cc72e38 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 16 Sep 2024 13:35:34 -0400 Subject: [PATCH 040/104] upgrade docker module, successfully build and deploy the deploy-node fixture --- .github/workflows/build.yml | 2 +- deploy.rb | 9 +- go.mod | 3 +- go.sum | 4 + internal/build/imgsrc/dockerfile_builder.go | 3 +- internal/build/imgsrc/local_image_resolver.go | 3 +- test/preflight/deployer_test.go | 91 +++++++++++-------- 7 files changed, 67 insertions(+), 48 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 774255d4fe..b3b0952303 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -40,7 +40,7 @@ jobs: registry: https://index.docker.io/v1/ username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_PASSWORD }} - - name: Deployer docker build + - name: Deployer docker build & push run: | mkdir -p bin cp dist/default_linux_amd64_v1/flyctl bin/flyctl diff --git a/deploy.rb b/deploy.rb index 91dd59a8d9..a7d35aa10e 100755 --- a/deploy.rb +++ b/deploy.rb @@ -83,6 +83,9 @@ end end +# -c arg if any +conf_arg = "" + if !DEPLOY_ONLY MANIFEST_PATH = "/tmp/manifest.json" @@ -218,6 +221,7 @@ # Write the fly config file to a tmp directory File.write("/tmp/fly.json", manifest["config"].to_json) + conf_arg = "-c /tmp/fly.json" ORG_SLUG = manifest["plan"]["org"] APP_REGION = manifest["plan"]["region"] @@ -255,7 +259,6 @@ # TODO: better error if missing config fly_config = manifest && manifest.dig("config") || JSON.parse(exec_capture("flyctl config show --local", log: false)) - APP_NAME = DEPLOY_APP_NAME || fly_config["app"] image_ref = in_step Step::BUILD do @@ -266,7 +269,7 @@ else image_ref = "registry.fly.io/#{APP_NAME}:#{image_tag}" - exec_capture("flyctl deploy --build-only --push -a #{APP_NAME} -c /tmp/fly.json --image-label #{image_tag}") + exec_capture("flyctl deploy --build-only --push -a #{APP_NAME} #{conf_arg} --image-label #{image_tag}") artifact Artifact::DOCKER_IMAGE, { ref: image_ref } image_ref end @@ -369,7 +372,7 @@ if DEPLOY_NOW in_step Step::DEPLOY do - exec_capture("flyctl deploy -a #{APP_NAME} -c /tmp/fly.json --image #{image_ref}") + exec_capture("flyctl deploy -a #{APP_NAME} #{conf_arg} --image #{image_ref}") end end diff --git a/go.mod b/go.mod index afac8d559a..e478ce001c 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/cli/safeexec v1.0.1 github.com/coder/websocket v1.8.12 github.com/depot/depot-go v0.3.0 - github.com/docker/docker v26.1.4+incompatible + github.com/docker/docker v27.2.1+incompatible github.com/docker/go-connections v0.5.0 github.com/docker/go-units v0.5.0 github.com/dustin/go-humanize v1.0.1 @@ -211,6 +211,7 @@ require ( github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/signal v0.7.0 // indirect github.com/moby/sys/user v0.1.0 // indirect + github.com/moby/sys/userns v0.1.0 // indirect github.com/moby/term v0.5.0 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/nats-io/nkeys v0.4.7 // indirect diff --git a/go.sum b/go.sum index 0709148cfe..6013681aa7 100644 --- a/go.sum +++ b/go.sum @@ -231,6 +231,8 @@ github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBi github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v26.1.4+incompatible h1:vuTpXDuoga+Z38m1OZHzl7NKisKWaWlhjQk7IDPSLsU= github.com/docker/docker v26.1.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.2.1+incompatible h1:fQdiLfW7VLscyoeYEBz7/J8soYFDZV1u6VW6gJEjNMI= +github.com/docker/docker v27.2.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= @@ -471,6 +473,8 @@ github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/internal/build/imgsrc/dockerfile_builder.go b/internal/build/imgsrc/dockerfile_builder.go index d94ccc31e5..98ab4891b9 100644 --- a/internal/build/imgsrc/dockerfile_builder.go +++ b/internal/build/imgsrc/dockerfile_builder.go @@ -15,6 +15,7 @@ import ( "time" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/system" dockerclient "github.com/docker/docker/client" "github.com/docker/docker/pkg/jsonmessage" @@ -546,7 +547,7 @@ func pushToFly(ctx context.Context, docker *dockerclient.Client, streams *iostre metrics.Started(ctx, "image_push") sendImgPushMetrics := metrics.StartTiming(ctx, "image_push/duration") - pushResp, err := docker.ImagePush(ctx, tag, types.ImagePushOptions{ + pushResp, err := docker.ImagePush(ctx, tag, image.PushOptions{ RegistryAuth: flyRegistryAuth(config.Tokens(ctx).Docker()), }) metrics.Status(ctx, "image_push", err == nil) diff --git a/internal/build/imgsrc/local_image_resolver.go b/internal/build/imgsrc/local_image_resolver.go index 5fad3782af..03b22e0bb4 100644 --- a/internal/build/imgsrc/local_image_resolver.go +++ b/internal/build/imgsrc/local_image_resolver.go @@ -6,7 +6,6 @@ import ( "regexp" "strings" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/image" dockerclient "github.com/docker/docker/client" dockerparser "github.com/novln/docker-parser" @@ -132,7 +131,7 @@ func findImageWithDocker(ctx context.Context, d *dockerclient.Client, imageName isID := imageIDPattern.MatchString(imageName) - images, err := d.ImageList(ctx, types.ImageListOptions{}) + images, err := d.ImageList(ctx, image.ListOptions{}) if err != nil { tracing.RecordError(span, err, "failed to list images") return nil, err diff --git a/test/preflight/deployer_test.go b/test/preflight/deployer_test.go index 5547a0e5f7..74d0b4c53a 100644 --- a/test/preflight/deployer_test.go +++ b/test/preflight/deployer_test.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "io" + "io/ioutil" "os" "testing" @@ -16,15 +17,17 @@ import ( "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" + v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/stretchr/testify/require" "github.com/superfly/flyctl/test/preflight/testlib" ) func TestDeployerDockerfile(t *testing.T) { - dockerClient, err := client.NewClientWithOpts(client.FromEnv) + dockerClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) if err != nil { panic(err) } + defer dockerClient.Close() f := testlib.NewTestEnvFromEnv(t) @@ -45,6 +48,10 @@ func TestDeployerDockerfile(t *testing.T) { }) require.NoError(t, err) + flytoml, err := ioutil.ReadFile(flyTomlPath) + require.NoError(t, err) + fmt.Printf("FLY TOML:\n%s\n", string(flytoml)) + // app required f.Fly("apps create %s -o %s", appName, f.OrgSlug()) @@ -53,40 +60,57 @@ func TestDeployerDockerfile(t *testing.T) { imageRef := os.Getenv("FLY_DEPLOYER_IMAGE") require.NotEmpty(t, imageRef) - fmt.Println("pulling image...") - out, err := dockerClient.ImagePull(ctx, imageRef, image.PullOptions{Platform: "linux/amd64"}) - if err != nil { - panic(err) - } + if os.Getenv("FLY_DEPLOYER_IMAGE_NO_PULL") == "" { + fmt.Println("pulling image...") + out, err := dockerClient.ImagePull(ctx, imageRef, image.PullOptions{Platform: "linux/amd64"}) + if err != nil { + panic(err) + } - defer out.Close() + defer out.Close() - _, err = io.Copy(os.Stdout, out) - if err != nil { - // TODO: fatal? - fmt.Printf("error copying image pull io: %v\n", err) + _, err = io.Copy(os.Stdout, out) + if err != nil { + // TODO: fatal? + fmt.Printf("error copying image pull io: %v\n", err) + } } - fmt.Println("creating container...") + fmt.Printf("creating container... binding /usr/src/app to %s\n", f.WorkDir()) cont, err := dockerClient.ContainerCreate(ctx, &container.Config{ - Hostname: "deployer", - Image: imageRef, + Image: imageRef, Env: []string{ fmt.Sprintf("FLY_API_TOKEN=%s", f.AccessToken()), fmt.Sprintf("DEPLOY_ORG_SLUG=%s", f.OrgSlug()), "DEPLOY_ONLY=1", + "DEPLOY_NOW=1", }, - AttachStdout: true, - AttachStderr: true, + Tty: false, }, &container.HostConfig{ RestartPolicy: container.RestartPolicy{ Name: container.RestartPolicyDisabled, }, - Binds: []string{fmt.Sprintf("%s:/usr/src/app", f.WorkDir())}, - }, &network.NetworkingConfig{ - EndpointsConfig: map[string]*network.EndpointSettings{}, - }, nil, fmt.Sprintf("deployer-%s", appName)) + Binds: []string{fmt.Sprintf("%s:/usr/src/app", f.WorkDir())}, + NetworkMode: network.NetworkHost, + }, nil, &v1.Platform{ + Architecture: "amd64", + OS: "linux", + }, fmt.Sprintf("deployer-%s", appName)) + + if err != nil { + panic(err) + } + + fmt.Printf("Container %s is created\n", cont.ID) + + defer dockerClient.ContainerRemove(ctx, cont.ID, container.RemoveOptions{ + RemoveVolumes: true, + RemoveLinks: true, + Force: true, + }) + fmt.Println("starting container...") + err = dockerClient.ContainerStart(ctx, cont.ID, container.StartOptions{}) if err != nil { panic(err) } @@ -94,9 +118,7 @@ func TestDeployerDockerfile(t *testing.T) { logs, err := dockerClient.ContainerLogs(context.Background(), cont.ID, container.LogsOptions{ ShowStderr: true, ShowStdout: true, - Timestamps: false, Follow: true, - Tail: "40", }) if err != nil { panic(err) @@ -104,32 +126,18 @@ func TestDeployerDockerfile(t *testing.T) { defer logs.Close() - fmt.Println("starting container...") - err = dockerClient.ContainerStart(ctx, cont.ID, container.StartOptions{}) - if err != nil { - panic(err) - } - - fmt.Printf("Container %s is created\n", cont.ID) - - defer dockerClient.ContainerRemove(ctx, cont.ID, container.RemoveOptions{ - RemoveVolumes: true, - RemoveLinks: true, - Force: true, - }) - waitCh, waitErrCh := dockerClient.ContainerWait(ctx, cont.ID, container.WaitConditionNotRunning) logCh := make(chan *log) go func() { - hdr := make([]byte, 8) for { - _, err = logs.Read(hdr) + _, err := logs.Read(hdr) + // fmt.Printf("read %d bytes of logs\n", n) if err != nil { if errors.Is(err, io.EOF) { - fmt.Println("EOF!") + // fmt.Println("EOF!") logCh <- nil break } @@ -142,7 +150,6 @@ func TestDeployerDockerfile(t *testing.T) { logCh <- &log{stream: hdr[0], data: dat} } - }() logDone := false @@ -184,6 +191,10 @@ func TestDeployerDockerfile(t *testing.T) { require.Nil(t, exitError) require.Zero(t, exitCode) + body, err := testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev", appName)) + require.NoError(t, err) + + require.Contains(t, string(body), fmt.Sprintf("Hello, World! %s", f.ID())) } type log struct { From 79413c0d1d38f9a1642f6d47278ed842cfd97364 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 16 Sep 2024 16:55:33 -0400 Subject: [PATCH 041/104] moved a few tests around, start parsing logs and testing the deployer's output --- deploy.rb | 2 +- test/{preflight => deployer}/deployer_test.go | 108 +++++++++++++----- .../fixtures/deploy-node/.dockerignore | 0 .../fixtures/deploy-node/Dockerfile | 0 .../fixtures/deploy-node/fly.toml | 0 .../fixtures/deploy-node/index.js | 0 .../fixtures/deploy-node/package-lock.json | 0 .../fixtures/deploy-node/package.json | 0 .../fixtures/deploy-node/somefile | 0 .../fixtures/launch-laravel/.gitignore | 0 .../fixtures/launch-laravel/Dockerfile | 0 test/preflight/apps_v2_integration_test.go | 2 +- test/preflight/fly_console_test.go | 2 +- test/preflight/fly_deploy_test.go | 21 +--- test/preflight/fly_launch_test.go | 4 +- test/preflight/fly_logs_test.go | 12 +- test/preflight/fly_machine_test.go | 2 +- test/preflight/fly_postgres_test.go | 2 +- test/preflight/fly_scale_test.go | 2 +- test/preflight/fly_tokens_test.go | 5 +- test/preflight/fly_volume_test.go | 2 +- test/{preflight => }/testlib/helpers.go | 12 +- test/{preflight => }/testlib/result.go | 0 test/{preflight => }/testlib/test_env.go | 0 24 files changed, 115 insertions(+), 61 deletions(-) rename test/{preflight => deployer}/deployer_test.go (68%) rename test/{preflight => }/fixtures/deploy-node/.dockerignore (100%) rename test/{preflight => }/fixtures/deploy-node/Dockerfile (100%) rename test/{preflight => }/fixtures/deploy-node/fly.toml (100%) rename test/{preflight => }/fixtures/deploy-node/index.js (100%) rename test/{preflight => }/fixtures/deploy-node/package-lock.json (100%) rename test/{preflight => }/fixtures/deploy-node/package.json (100%) rename test/{preflight => }/fixtures/deploy-node/somefile (100%) rename test/{preflight => }/fixtures/launch-laravel/.gitignore (100%) rename test/{preflight => }/fixtures/launch-laravel/Dockerfile (100%) rename test/{preflight => }/testlib/helpers.go (95%) rename test/{preflight => }/testlib/result.go (100%) rename test/{preflight => }/testlib/test_env.go (100%) diff --git a/deploy.rb b/deploy.rb index a7d35aa10e..bde774f3dd 100755 --- a/deploy.rb +++ b/deploy.rb @@ -54,7 +54,7 @@ steps.push({id: Step::CUSTOMIZE, description: "Customize deployment plan"}) if DEPLOY_CUSTOMIZE else # only deploying, so we need to send the artifacts right away - steps.push({id: Step::BUILD, description: "Build image"}) if GIT_REPO + steps.push({id: Step::BUILD, description: "Build image"}) steps.push({id: Step::DEPLOY, description: "Deploy application"}) if DEPLOY_NOW artifact Artifact::META, { steps: steps } end diff --git a/test/preflight/deployer_test.go b/test/deployer/deployer_test.go similarity index 68% rename from test/preflight/deployer_test.go rename to test/deployer/deployer_test.go index 74d0b4c53a..9ea8d3a0c0 100644 --- a/test/preflight/deployer_test.go +++ b/test/deployer/deployer_test.go @@ -1,16 +1,17 @@ //go:build integration // +build integration -package preflight +package deployer import ( "context" "encoding/binary" + "encoding/json" "errors" "fmt" "io" - "io/ioutil" "os" + "strings" "testing" "github.com/docker/docker/api/types/container" @@ -19,7 +20,7 @@ import ( "github.com/docker/docker/client" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/stretchr/testify/require" - "github.com/superfly/flyctl/test/preflight/testlib" + "github.com/superfly/flyctl/test/testlib" ) func TestDeployerDockerfile(t *testing.T) { @@ -31,7 +32,7 @@ func TestDeployerDockerfile(t *testing.T) { f := testlib.NewTestEnvFromEnv(t) - err = copyFixtureIntoWorkDir(f.WorkDir(), "deploy-node", []string{}) + err = testlib.CopyFixtureIntoWorkDir(f.WorkDir(), "deploy-node", []string{}) require.NoError(t, err) flyTomlPath := fmt.Sprintf("%s/fly.toml", f.WorkDir()) @@ -48,10 +49,6 @@ func TestDeployerDockerfile(t *testing.T) { }) require.NoError(t, err) - flytoml, err := ioutil.ReadFile(flyTomlPath) - require.NoError(t, err) - fmt.Printf("FLY TOML:\n%s\n", string(flytoml)) - // app required f.Fly("apps create %s -o %s", appName, f.OrgSlug()) @@ -131,9 +128,11 @@ func TestDeployerDockerfile(t *testing.T) { logCh := make(chan *log) go func() { + var err error hdr := make([]byte, 8) for { - _, err := logs.Read(hdr) + // var n int + _, err = logs.Read(hdr) // fmt.Printf("read %d bytes of logs\n", n) if err != nil { if errors.Is(err, io.EOF) { @@ -152,52 +151,107 @@ func TestDeployerDockerfile(t *testing.T) { } }() - logDone := false + msgDone := false exited := false var exitCode int64 - var exitError error + + dep := DeployerOut{Artifacts: map[string]json.RawMessage{}} for { - if exited && logDone { - fmt.Printf("container done, code: %d, error: %+v\n", exitCode, exitError) + if err != nil || (exited && msgDone) { + fmt.Printf("container done, code: %d, error: %+v\n", exitCode, err) break } select { case l := <-logCh: - logDone = l == nil - if !logDone { - // var w io.Writer - // switch l.stream { - // case 1: - // w = os.Stdout - // default: - // w = os.Stderr - // } - - fmt.Printf(string(l.data)) + msgDone = l == nil + if !msgDone { + var msg Message + + fmt.Print(string(l.data)) + + if len(l.data) > 0 { + err = json.Unmarshal(l.data, &msg) + if err == nil { + if msg.Step != "" { + found := false + for _, s := range dep.Steps { + if s == msg.Step { + found = true + break + } + } + if !found { + dep.Steps = append(dep.Steps, msg.Step) + } + } + + if artifactName := strings.TrimPrefix(msg.Type, "artifact:"); artifactName != msg.Type { + dep.Artifacts[artifactName] = msg.Payload + } + + dep.Messages = append(dep.Messages, msg) + } + } } case w := <-waitCh: exited = true exitCode = w.StatusCode if w.Error != nil { - exitError = errors.New(w.Error.Message) + err = errors.New(w.Error.Message) } case we := <-waitErrCh: exited = true - exitError = we + err = we } } - require.Nil(t, exitError) + require.Nil(t, err) require.Zero(t, exitCode) body, err := testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev", appName)) require.NoError(t, err) require.Contains(t, string(body), fmt.Sprintf("Hello, World! %s", f.ID())) + + var meta ArtifactMeta + err = json.Unmarshal(dep.Artifacts["meta"], &meta) + require.NoError(t, err) + + stepNames := make([]string, len(meta.Steps)+1) + stepNames[0] = "__root__" + for i, step := range meta.Steps { + stepNames[i+1] = step.ID + } + + require.Equal(t, dep.Steps, stepNames) } type log struct { stream uint8 data []byte } + +type Message struct { + ID int `json:"id"` + Step string `json:"step"` + Type string `json:"type"` + Time float64 `json:"time"` + + Payload json.RawMessage `json:"payload"` +} + +type DeployerOut struct { + Messages []Message + Steps []string + Artifacts map[string]json.RawMessage +} + +type Step struct { + ID string `json:"id"` + Description string `json:"description"` +} + +type ArtifactMeta struct { + Steps []Step `json:"steps"` +} diff --git a/test/preflight/fixtures/deploy-node/.dockerignore b/test/fixtures/deploy-node/.dockerignore similarity index 100% rename from test/preflight/fixtures/deploy-node/.dockerignore rename to test/fixtures/deploy-node/.dockerignore diff --git a/test/preflight/fixtures/deploy-node/Dockerfile b/test/fixtures/deploy-node/Dockerfile similarity index 100% rename from test/preflight/fixtures/deploy-node/Dockerfile rename to test/fixtures/deploy-node/Dockerfile diff --git a/test/preflight/fixtures/deploy-node/fly.toml b/test/fixtures/deploy-node/fly.toml similarity index 100% rename from test/preflight/fixtures/deploy-node/fly.toml rename to test/fixtures/deploy-node/fly.toml diff --git a/test/preflight/fixtures/deploy-node/index.js b/test/fixtures/deploy-node/index.js similarity index 100% rename from test/preflight/fixtures/deploy-node/index.js rename to test/fixtures/deploy-node/index.js diff --git a/test/preflight/fixtures/deploy-node/package-lock.json b/test/fixtures/deploy-node/package-lock.json similarity index 100% rename from test/preflight/fixtures/deploy-node/package-lock.json rename to test/fixtures/deploy-node/package-lock.json diff --git a/test/preflight/fixtures/deploy-node/package.json b/test/fixtures/deploy-node/package.json similarity index 100% rename from test/preflight/fixtures/deploy-node/package.json rename to test/fixtures/deploy-node/package.json diff --git a/test/preflight/fixtures/deploy-node/somefile b/test/fixtures/deploy-node/somefile similarity index 100% rename from test/preflight/fixtures/deploy-node/somefile rename to test/fixtures/deploy-node/somefile diff --git a/test/preflight/fixtures/launch-laravel/.gitignore b/test/fixtures/launch-laravel/.gitignore similarity index 100% rename from test/preflight/fixtures/launch-laravel/.gitignore rename to test/fixtures/launch-laravel/.gitignore diff --git a/test/preflight/fixtures/launch-laravel/Dockerfile b/test/fixtures/launch-laravel/Dockerfile similarity index 100% rename from test/preflight/fixtures/launch-laravel/Dockerfile rename to test/fixtures/launch-laravel/Dockerfile diff --git a/test/preflight/apps_v2_integration_test.go b/test/preflight/apps_v2_integration_test.go index 3f76239fb9..132d8b8221 100644 --- a/test/preflight/apps_v2_integration_test.go +++ b/test/preflight/apps_v2_integration_test.go @@ -17,7 +17,7 @@ import ( "github.com/stretchr/testify/require" fly "github.com/superfly/fly-go" "github.com/superfly/flyctl/internal/appconfig" - "github.com/superfly/flyctl/test/preflight/testlib" + "github.com/superfly/flyctl/test/testlib" ) func TestAppsV2Example(t *testing.T) { diff --git a/test/preflight/fly_console_test.go b/test/preflight/fly_console_test.go index ff61612dea..8203ceb272 100644 --- a/test/preflight/fly_console_test.go +++ b/test/preflight/fly_console_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/superfly/flyctl/test/preflight/testlib" + "github.com/superfly/flyctl/test/testlib" ) func TestFlyConsole(t *testing.T) { diff --git a/test/preflight/fly_deploy_test.go b/test/preflight/fly_deploy_test.go index 5584314a99..65079ff489 100644 --- a/test/preflight/fly_deploy_test.go +++ b/test/preflight/fly_deploy_test.go @@ -6,7 +6,6 @@ package preflight import ( "fmt" "path/filepath" - "runtime" "strings" "testing" "time" @@ -17,7 +16,7 @@ import ( //fly "github.com/superfly/fly-go" - "github.com/superfly/flyctl/test/preflight/testlib" + "github.com/superfly/flyctl/test/testlib" ) func TestFlyDeployHA(t *testing.T) { @@ -123,19 +122,9 @@ func TestFlyDeploySlowMetrics(t *testing.T) { f.Fly("deploy") } -func getRootPath() string { - _, b, _, _ := runtime.Caller(0) - return filepath.Dir(b) -} - -func copyFixtureIntoWorkDir(workDir, name string, exclusion []string) error { - src := fmt.Sprintf("%s/fixtures/%s", getRootPath(), name) - return testlib.CopyDir(src, workDir, exclusion) -} - func TestFlyDeployNodeAppWithRemoteBuilder(t *testing.T) { f := testlib.NewTestEnvFromEnv(t) - err := copyFixtureIntoWorkDir(f.WorkDir(), "deploy-node", []string{}) + err := testlib.CopyFixtureIntoWorkDir(f.WorkDir(), "deploy-node", []string{}) require.NoError(t, err) flyTomlPath := fmt.Sprintf("%s/fly.toml", f.WorkDir()) @@ -171,7 +160,7 @@ func TestFlyDeployNodeAppWithRemoteBuilderWithoutWireguard(t *testing.T) { t.Skip() } - err := copyFixtureIntoWorkDir(f.WorkDir(), "deploy-node", []string{}) + err := testlib.CopyFixtureIntoWorkDir(f.WorkDir(), "deploy-node", []string{}) require.NoError(t, err) flyTomlPath := fmt.Sprintf("%s/fly.toml", f.WorkDir()) @@ -198,7 +187,7 @@ func TestFlyDeployNodeAppWithRemoteBuilderWithoutWireguard(t *testing.T) { func TestFlyDeployNodeAppWithDepotRemoteBuilder(t *testing.T) { f := testlib.NewTestEnvFromEnv(t) - err := copyFixtureIntoWorkDir(f.WorkDir(), "deploy-node", []string{}) + err := testlib.CopyFixtureIntoWorkDir(f.WorkDir(), "deploy-node", []string{}) require.NoError(t, err) flyTomlPath := fmt.Sprintf("%s/fly.toml", f.WorkDir()) @@ -233,7 +222,7 @@ func TestFlyDeployBasicNodeWithWGEnabled(t *testing.T) { t.Skip() } - err := copyFixtureIntoWorkDir(f.WorkDir(), "deploy-node", []string{}) + err := testlib.CopyFixtureIntoWorkDir(f.WorkDir(), "deploy-node", []string{}) require.NoError(t, err) flyTomlPath := fmt.Sprintf("%s/fly.toml", f.WorkDir()) diff --git a/test/preflight/fly_launch_test.go b/test/preflight/fly_launch_test.go index 1ed3d50411..a4d2f7bbb3 100644 --- a/test/preflight/fly_launch_test.go +++ b/test/preflight/fly_launch_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" fly "github.com/superfly/fly-go" - "github.com/superfly/flyctl/test/preflight/testlib" + "github.com/superfly/flyctl/test/testlib" ) // TODO: list of things to test @@ -298,7 +298,7 @@ RUN --mount=type=secret,id=secret1 cat /run/secrets/secret1 > /tmp/secrets.txt func TestFlyLaunchBasicNodeApp(t *testing.T) { f := testlib.NewTestEnvFromEnv(t) - err := copyFixtureIntoWorkDir(f.WorkDir(), "deploy-node", []string{}) + err := testlib.CopyFixtureIntoWorkDir(f.WorkDir(), "deploy-node", []string{}) require.NoError(t, err) flyTomlPath := fmt.Sprintf("%s/fly.toml", f.WorkDir()) diff --git a/test/preflight/fly_logs_test.go b/test/preflight/fly_logs_test.go index ebbb5b260d..5724ac401a 100644 --- a/test/preflight/fly_logs_test.go +++ b/test/preflight/fly_logs_test.go @@ -4,11 +4,11 @@ package preflight import ( - "github.com/stretchr/testify/require" - "github.com/superfly/flyctl/test/preflight/testlib" "testing" -) + "github.com/stretchr/testify/require" + "github.com/superfly/flyctl/test/testlib" +) func TestFlyLogsMachineFlagBehavior(t *testing.T) { // Test `flyctl logs` with different flag combinations @@ -28,16 +28,16 @@ func TestFlyLogsMachineFlagBehavior(t *testing.T) { // Test if --machine works, should not throw an error t.Run("TestRunsWhenMachineFlagProvided", func(tt *testing.T) { - f.Fly("logs --app "+appName+" --no-tail --machine " + machineId) + f.Fly("logs --app " + appName + " --no-tail --machine " + machineId) }) // Test if --instance works, should not throw an error t.Run("TestRunsWhenInstanceFlagProvided", func(tt *testing.T) { - f.Fly("logs --app "+appName+" --no-tail --instance " + machineId) + f.Fly("logs --app " + appName + " --no-tail --instance " + machineId) }) // Test if alias shorthand -i works, should not throw an error t.Run("TestRunsWhenInstanceShorthandProvided", func(tt *testing.T) { - f.Fly("logs --app "+appName+" --no-tail -i " + machineId) + f.Fly("logs --app " + appName + " --no-tail -i " + machineId) }) } diff --git a/test/preflight/fly_machine_test.go b/test/preflight/fly_machine_test.go index 86d7732099..011d78ca61 100644 --- a/test/preflight/fly_machine_test.go +++ b/test/preflight/fly_machine_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" fly "github.com/superfly/fly-go" - "github.com/superfly/flyctl/test/preflight/testlib" + "github.com/superfly/flyctl/test/testlib" ) // test --port and --autostart --autostop flags diff --git a/test/preflight/fly_postgres_test.go b/test/preflight/fly_postgres_test.go index 154166bd36..f140f5761f 100644 --- a/test/preflight/fly_postgres_test.go +++ b/test/preflight/fly_postgres_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" fly "github.com/superfly/fly-go" - "github.com/superfly/flyctl/test/preflight/testlib" + "github.com/superfly/flyctl/test/testlib" ) func TestPostgres_singleNode(t *testing.T) { diff --git a/test/preflight/fly_scale_test.go b/test/preflight/fly_scale_test.go index cc91ae9a07..edcfca97a8 100644 --- a/test/preflight/fly_scale_test.go +++ b/test/preflight/fly_scale_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/superfly/flyctl/test/preflight/testlib" + "github.com/superfly/flyctl/test/testlib" ) func TestFlyScaleCount(t *testing.T) { diff --git a/test/preflight/fly_tokens_test.go b/test/preflight/fly_tokens_test.go index 515759473d..db914c1b81 100644 --- a/test/preflight/fly_tokens_test.go +++ b/test/preflight/fly_tokens_test.go @@ -4,10 +4,11 @@ package preflight import ( - "github.com/stretchr/testify/require" - "github.com/superfly/flyctl/test/preflight/testlib" "os" "testing" + + "github.com/stretchr/testify/require" + "github.com/superfly/flyctl/test/testlib" ) // TODO: list of things to test diff --git a/test/preflight/fly_volume_test.go b/test/preflight/fly_volume_test.go index 3dbfdef076..e7b6f4a060 100644 --- a/test/preflight/fly_volume_test.go +++ b/test/preflight/fly_volume_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/superfly/fly-go" - "github.com/superfly/flyctl/test/preflight/testlib" + "github.com/superfly/flyctl/test/testlib" ) func TestFlyVolumeExtend(t *testing.T) { diff --git a/test/preflight/testlib/helpers.go b/test/testlib/helpers.go similarity index 95% rename from test/preflight/testlib/helpers.go rename to test/testlib/helpers.go index 05f8697bfb..b9d4cd5d8b 100644 --- a/test/preflight/testlib/helpers.go +++ b/test/testlib/helpers.go @@ -54,7 +54,7 @@ func otherRegionsFromEnv() []string { func currentRepoFlyctl() string { _, filename, _, _ := runtime.Caller(0) - flyctlBin := path.Join(path.Dir(filename), "../../..", "bin", "flyctl") + flyctlBin := path.Join(path.Dir(filename), "../..", "bin", "flyctl") return flyctlBin } @@ -328,3 +328,13 @@ func OverwriteConfig(path string, data map[string]any) error { return nil } + +func getRootPath() string { + _, b, _, _ := runtime.Caller(0) + return filepath.Dir(b) +} + +func CopyFixtureIntoWorkDir(workDir, name string, exclusion []string) error { + src := fmt.Sprintf("%s/../fixtures/%s", getRootPath(), name) + return CopyDir(src, workDir, exclusion) +} diff --git a/test/preflight/testlib/result.go b/test/testlib/result.go similarity index 100% rename from test/preflight/testlib/result.go rename to test/testlib/result.go diff --git a/test/preflight/testlib/test_env.go b/test/testlib/test_env.go similarity index 100% rename from test/preflight/testlib/test_env.go rename to test/testlib/test_env.go From f93cdba7178a74add88f8ecc8077068e2fe1852a Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Tue, 17 Sep 2024 07:51:18 -0400 Subject: [PATCH 042/104] ignore .fly --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index e300a51e73..9896192d99 100644 --- a/.gitignore +++ b/.gitignore @@ -41,3 +41,5 @@ out # generated release meta release.json + +.fly \ No newline at end of file From 50f092820909c3c0dd305d28db104f13c6e9b3f2 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Tue, 17 Sep 2024 08:27:00 -0400 Subject: [PATCH 043/104] always generate git diff --- deploy.rb | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/deploy.rb b/deploy.rb index bde774f3dd..e5e7fb2695 100755 --- a/deploy.rb +++ b/deploy.rb @@ -128,7 +128,6 @@ RUNTIME_VERSION = manifest.dig("plan", "runtime", "version") DO_INSTALL_DEPS = REQUIRES_DEPENDENCIES.include?(RUNTIME_LANGUAGE) - DO_GEN_REQS = !RUNTIME_LANGUAGE.empty? steps.push({id: Step::INSTALL_DEPENDENCIES, description: "Install required dependencies", async: true}) if DO_INSTALL_DEPS @@ -232,7 +231,7 @@ TIGRIS = manifest.dig("plan", "object_storage", "tigris_object_storage") SENTRY = manifest.dig("plan", "sentry") == true - steps.push({id: Step::GENERATE_BUILD_REQUIREMENTS, description: "Generate requirements for build"}) if DO_GEN_REQS + steps.push({id: Step::GENERATE_BUILD_REQUIREMENTS, description: "Generate requirements for build"}) steps.push({id: Step::BUILD, description: "Build image"}) if GIT_REPO steps.push({id: Step::FLY_POSTGRES_CREATE, description: "Create and attach PostgreSQL database"}) if FLY_PG steps.push({id: Step::SUPABASE_POSTGRES, description: "Create Supabase PostgreSQL database"}) if SUPABASE @@ -247,13 +246,11 @@ # Join the parallel task thread deps_thread.join - if DO_GEN_REQS - in_step Step::GENERATE_BUILD_REQUIREMENTS do - exec_capture("flyctl launch plan generate #{MANIFEST_PATH}") - exec_capture("git add -A", log: false) - diff = exec_capture("git diff --cached", log: false) - artifact Artifact::DIFF, { output: diff } - end + in_step Step::GENERATE_BUILD_REQUIREMENTS do + exec_capture("flyctl launch plan generate #{MANIFEST_PATH}") + exec_capture("git add -A", log: false) + diff = exec_capture("git diff --cached", log: false) + artifact Artifact::DIFF, { output: diff } end end From 45210ba9729e0d94ccaf1df79b42d1051d4e2292 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Tue, 17 Sep 2024 10:25:18 -0400 Subject: [PATCH 044/104] abstracted the deployer run to be reusable --- test/deployer/deployer_test.go | 230 ++------------------ test/testlib/deployer.go | 377 +++++++++++++++++++++++++++++++++ 2 files changed, 399 insertions(+), 208 deletions(-) create mode 100644 test/testlib/deployer.go diff --git a/test/deployer/deployer_test.go b/test/deployer/deployer_test.go index 41c83527f7..dd31a055ab 100644 --- a/test/deployer/deployer_test.go +++ b/test/deployer/deployer_test.go @@ -5,253 +5,67 @@ package deployer import ( "context" - "encoding/binary" - "encoding/json" - "errors" "fmt" - "io" - "os" - "strings" "testing" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/client" - v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/stretchr/testify/require" "github.com/superfly/flyctl/test/testlib" ) -func TestDeployerDockerfile(t *testing.T) { - dockerClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) - if err != nil { - panic(err) - } - defer dockerClient.Close() +func TestDeployBasicNode(t *testing.T) { + ctx := context.TODO() + d, err := testlib.NewDeployerTestEnvFromEnv(ctx, t) + require.NoError(t, err) - f := testlib.NewTestEnvFromEnv(t) + defer d.Close() - err = testlib.CopyFixtureIntoWorkDir(f.WorkDir(), "deploy-node") + err = testlib.CopyFixtureIntoWorkDir(d.WorkDir(), "deploy-node") require.NoError(t, err) - flyTomlPath := fmt.Sprintf("%s/fly.toml", f.WorkDir()) + flyTomlPath := fmt.Sprintf("%s/fly.toml", d.WorkDir()) - appName := f.CreateRandomAppName() + appName := d.CreateRandomAppName() require.NotEmpty(t, appName) err = testlib.OverwriteConfig(flyTomlPath, map[string]any{ "app": appName, - "region": f.PrimaryRegion(), + "region": d.PrimaryRegion(), "env": map[string]string{ - "TEST_ID": f.ID(), + "TEST_ID": d.ID(), }, }) require.NoError(t, err) // app required - f.Fly("apps create %s -o %s", appName, f.OrgSlug()) - - ctx := context.TODO() - - imageRef := os.Getenv("FLY_DEPLOYER_IMAGE") - require.NotEmpty(t, imageRef) - - if os.Getenv("FLY_DEPLOYER_IMAGE_NO_PULL") == "" { - fmt.Println("pulling image...") - out, err := dockerClient.ImagePull(ctx, imageRef, image.PullOptions{Platform: "linux/amd64"}) - if err != nil { - panic(err) - } - - defer out.Close() - - _, err = io.Copy(os.Stdout, out) - if err != nil { - // TODO: fatal? - fmt.Printf("error copying image pull io: %v\n", err) - } - } - - fmt.Printf("creating container... binding /usr/src/app to %s\n", f.WorkDir()) - cont, err := dockerClient.ContainerCreate(ctx, &container.Config{ - Image: imageRef, - Env: []string{ - fmt.Sprintf("FLY_API_TOKEN=%s", f.AccessToken()), - fmt.Sprintf("DEPLOY_ORG_SLUG=%s", f.OrgSlug()), - "DEPLOY_ONLY=1", - "DEPLOY_NOW=1", - }, - Tty: false, - }, &container.HostConfig{ - RestartPolicy: container.RestartPolicy{ - Name: container.RestartPolicyDisabled, - }, - Binds: []string{fmt.Sprintf("%s:/usr/src/app", f.WorkDir())}, - NetworkMode: network.NetworkHost, - }, nil, &v1.Platform{ - Architecture: "amd64", - OS: "linux", - }, fmt.Sprintf("deployer-%s", appName)) - - if err != nil { - panic(err) - } - - fmt.Printf("Container %s is created\n", cont.ID) - - defer dockerClient.ContainerRemove(ctx, cont.ID, container.RemoveOptions{ - RemoveVolumes: true, - RemoveLinks: true, - Force: true, - }) - - fmt.Println("starting container...") - err = dockerClient.ContainerStart(ctx, cont.ID, container.StartOptions{}) - if err != nil { - panic(err) - } - - logs, err := dockerClient.ContainerLogs(context.Background(), cont.ID, container.LogsOptions{ - ShowStderr: true, - ShowStdout: true, - Follow: true, - }) - if err != nil { - panic(err) - } - - defer logs.Close() + d.Fly("apps create %s -o %s", appName, d.OrgSlug()) - waitCh, waitErrCh := dockerClient.ContainerWait(ctx, cont.ID, container.WaitConditionNotRunning) + deploy := d.NewRun(testlib.DeployOnly, testlib.DeployNow, testlib.WithAppSource(d.WorkDir())) - logCh := make(chan *log) + defer deploy.Close() - go func() { - var err error - hdr := make([]byte, 8) - for { - // var n int - _, err = logs.Read(hdr) - // fmt.Printf("read %d bytes of logs\n", n) - if err != nil { - if errors.Is(err, io.EOF) { - // fmt.Println("EOF!") - logCh <- nil - break - } - panic(err) - } + err = deploy.Start(ctx) - count := binary.BigEndian.Uint32(hdr[4:]) - dat := make([]byte, count) - _, err = logs.Read(dat) - - logCh <- &log{stream: hdr[0], data: dat} - } - }() - - msgDone := false - exited := false - var exitCode int64 - - dep := DeployerOut{Artifacts: map[string]json.RawMessage{}} - - for { - if err != nil || (exited && msgDone) { - fmt.Printf("container done, code: %d, error: %+v\n", exitCode, err) - break - } - select { - case l := <-logCh: - msgDone = l == nil - if !msgDone { - var msg Message - - fmt.Print(string(l.data)) - - if len(l.data) > 0 { - err = json.Unmarshal(l.data, &msg) - if err == nil { - if msg.Step != "" { - found := false - for _, s := range dep.Steps { - if s == msg.Step { - found = true - break - } - } - if !found { - dep.Steps = append(dep.Steps, msg.Step) - } - } - - if artifactName := strings.TrimPrefix(msg.Type, "artifact:"); artifactName != msg.Type { - dep.Artifacts[artifactName] = msg.Payload - } - - dep.Messages = append(dep.Messages, msg) - } - } - } - case w := <-waitCh: - exited = true - exitCode = w.StatusCode - if w.Error != nil { - err = errors.New(w.Error.Message) - } - case we := <-waitErrCh: - exited = true - err = we - } - } + require.Nil(t, err) + out, err := deploy.Wait() require.Nil(t, err) - require.Zero(t, exitCode) + + require.Zero(t, deploy.ExitCode()) body, err := testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev", appName)) require.NoError(t, err) - require.Contains(t, string(body), fmt.Sprintf("Hello, World! %s", f.ID())) + require.Contains(t, string(body), fmt.Sprintf("Hello, World! %s", d.ID())) - var meta ArtifactMeta - err = json.Unmarshal(dep.Artifacts["meta"], &meta) + meta, err := out.ArtifactMeta() require.NoError(t, err) - stepNames := make([]string, len(meta.Steps)+1) - stepNames[0] = "__root__" - for i, step := range meta.Steps { - stepNames[i+1] = step.ID - } + stepNames := append([]string{"__root__"}, meta.StepNames()...) - require.Equal(t, dep.Steps, stepNames) + require.Equal(t, out.Steps, stepNames) } type log struct { stream uint8 data []byte } - -type Message struct { - ID int `json:"id"` - Step string `json:"step"` - Type string `json:"type"` - Time float64 `json:"time"` - - Payload json.RawMessage `json:"payload"` -} - -type DeployerOut struct { - Messages []Message - Steps []string - Artifacts map[string]json.RawMessage -} - -type Step struct { - ID string `json:"id"` - Description string `json:"description"` -} - -type ArtifactMeta struct { - Steps []Step `json:"steps"` -} diff --git a/test/testlib/deployer.go b/test/testlib/deployer.go new file mode 100644 index 0000000000..d0a6bfb3db --- /dev/null +++ b/test/testlib/deployer.go @@ -0,0 +1,377 @@ +//go:build integration +// +build integration + +package testlib + +import ( + "context" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/client" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + + "github.com/stretchr/testify/require" +) + +type DeployerTestEnv struct { + *FlyctlTestEnv + t testing.TB + dockerClient *client.Client + image string + noPull bool +} + +func NewDeployerTestEnvFromEnv(ctx context.Context, t testing.TB) (*DeployerTestEnv, error) { + imageRef := os.Getenv("FLY_DEPLOYER_IMAGE") + require.NotEmpty(t, imageRef) + noPull := os.Getenv("FLY_DEPLOYER_IMAGE_NO_PULL") != "" + + dockerClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return nil, err + } + + dockerClient.NegotiateAPIVersion(ctx) + + fmt.Printf("docker API version: %s\n", dockerClient.ClientVersion()) + + if !noPull { + fmt.Println("pulling image...") + out, err := dockerClient.ImagePull(ctx, imageRef, image.PullOptions{Platform: "linux/amd64"}) + if err != nil { + return nil, err + } + defer out.Close() + + _, err = io.Copy(os.Stdout, out) + if err != nil { + return nil, err + } + } + + return &DeployerTestEnv{FlyctlTestEnv: NewTestEnvFromEnv(t), t: t, dockerClient: dockerClient, image: imageRef, noPull: noPull}, nil +} + +func (d *DeployerTestEnv) Close() error { + return d.dockerClient.Close() +} + +func (d *DeployerTestEnv) NewRun(options ...func(*DeployTestRun)) *DeployTestRun { + run := &DeployTestRun{dockerClient: d.dockerClient, deployerImage: d.image, apiToken: d.FlyctlTestEnv.AccessToken(), orgSlug: d.FlyctlTestEnv.OrgSlug(), containerBinds: []string{}} + for _, o := range options { + o(run) + } + return run +} + +type DeployTestRun struct { + dockerClient *client.Client + deployerImage string + + // required! + apiToken string + orgSlug string + + appName string + gitRepo string + gitRef string + + deployOnly bool + deployNow bool + + containerBinds []string + + containerID string + + waitCh chan *DeployerOut + waitErrCh chan error + + exitCode int64 + + done bool + out *DeployerOut + err error +} + +func WithApp(app string) func(*DeployTestRun) { + return func(d *DeployTestRun) { + d.appName = app + } +} + +func WithGitRepo(repo string) func(*DeployTestRun) { + return func(d *DeployTestRun) { + d.gitRepo = repo + } +} + +func WithGitRef(ref string) func(*DeployTestRun) { + return func(d *DeployTestRun) { + d.gitRef = ref + } +} + +func DeployOnly(d *DeployTestRun) { + d.deployOnly = true + +} + +func DeployNow(d *DeployTestRun) { + d.deployNow = true +} + +func WithAppSource(src string) func(*DeployTestRun) { + return func(d *DeployTestRun) { + d.containerBinds = append(d.containerBinds, fmt.Sprintf("%s:/usr/src/app", src)) + } +} + +func (d *DeployTestRun) Start(ctx context.Context) error { + env := []string{ + fmt.Sprintf("FLY_API_TOKEN=%s", d.apiToken), + fmt.Sprintf("DEPLOY_ORG_SLUG=%s", d.orgSlug), + } + + if d.appName != "" { + env = append(env, fmt.Sprintf("DEPLOY_APP_NAME=%s", d.appName)) + } + if d.gitRepo != "" { + env = append(env, fmt.Sprintf("GIT_REPO=%s", d.gitRepo)) + } + if d.gitRef != "" { + env = append(env, fmt.Sprintf("GIT_REF=%s", d.gitRef)) + } + + if d.deployOnly { + env = append(env, "DEPLOY_ONLY=1") + } + if d.deployNow { + env = append(env, "DEPLOY_NOW=1") + } + + fmt.Printf("creating container... image=%s\n", d.deployerImage) + cont, err := d.dockerClient.ContainerCreate(ctx, &container.Config{ + Image: d.deployerImage, + Env: env, + Tty: false, + }, &container.HostConfig{ + RestartPolicy: container.RestartPolicy{ + Name: container.RestartPolicyDisabled, + }, + Binds: d.containerBinds, + NetworkMode: network.NetworkHost, + }, nil, &v1.Platform{ + Architecture: "amd64", + OS: "linux", + }, "") + + if err != nil { + return err + } + + d.containerID = cont.ID + + fmt.Println("starting container...") + err = d.dockerClient.ContainerStart(ctx, cont.ID, container.StartOptions{}) + if err != nil { + fmt.Printf("could not start container: %+v\n", err) + return err + } + + d.waitCh = make(chan *DeployerOut, 1) + d.waitErrCh = make(chan error, 1) + + go func() { + defer d.Close() + + logs, err := d.dockerClient.ContainerLogs(context.Background(), cont.ID, container.LogsOptions{ + ShowStderr: true, + ShowStdout: true, + Follow: true, + }) + if err != nil { + panic(err) + } + + defer logs.Close() + + waitCh, waitErrCh := d.dockerClient.ContainerWait(ctx, cont.ID, container.WaitConditionNotRunning) + + logCh := make(chan *log) + + go func() { + var err error + hdr := make([]byte, 8) + for { + // var n int + _, err = logs.Read(hdr) + // fmt.Printf("read %d bytes of logs\n", n) + if err != nil { + if errors.Is(err, io.EOF) { + // fmt.Println("EOF!") + logCh <- nil + break + } + d.err = err + d.waitErrCh <- err + d.done = true + } + + count := binary.BigEndian.Uint32(hdr[4:]) + dat := make([]byte, count) + _, err = logs.Read(dat) + + logCh <- &log{stream: hdr[0], data: dat} + } + }() + + msgDone := false + exited := false + + d.out = &DeployerOut{Artifacts: map[string]json.RawMessage{}} + + for { + if d.done { + break + } + if err != nil || (exited && msgDone) { + fmt.Printf("container done, code: %d, error: %+v\n", d.exitCode, err) + if err != nil { + d.err = err + d.waitErrCh <- err + } else { + d.waitCh <- d.out + } + d.done = true + break + } + select { + case l := <-logCh: + msgDone = l == nil + if !msgDone { + var msg Message + + fmt.Print(string(l.data)) + + if len(l.data) > 0 { + err = json.Unmarshal(l.data, &msg) + if err == nil { + if msg.Step != "" { + found := false + for _, s := range d.out.Steps { + if s == msg.Step { + found = true + break + } + } + if !found { + d.out.Steps = append(d.out.Steps, msg.Step) + } + } + + if artifactName := strings.TrimPrefix(msg.Type, "artifact:"); artifactName != msg.Type { + d.out.Artifacts[artifactName] = msg.Payload + } + + d.out.Messages = append(d.out.Messages, msg) + } + } + } + case w := <-waitCh: + exited = true + d.exitCode = w.StatusCode + if w.Error != nil { + err = errors.New(w.Error.Message) + } + case we := <-waitErrCh: + exited = true + err = we + } + } + }() + + return nil +} + +func (d *DeployTestRun) Wait() (*DeployerOut, error) { + if d.done { + if d.err != nil { + return nil, d.err + } + return d.out, nil + } + select { + case out := <-d.waitCh: + return out, nil + case err := <-d.waitErrCh: + return nil, err + } +} + +func (d *DeployTestRun) ExitCode() int64 { + return d.exitCode +} + +func (d *DeployTestRun) Close() error { + return d.dockerClient.ContainerRemove(context.TODO(), d.containerID, container.RemoveOptions{ + RemoveVolumes: true, + RemoveLinks: true, + Force: true, + }) +} + +type log struct { + stream uint8 + data []byte +} + +type Message struct { + ID int `json:"id"` + Step string `json:"step"` + Type string `json:"type"` + Time float64 `json:"time"` + + Payload json.RawMessage `json:"payload"` +} + +type Step struct { + ID string `json:"id"` + Description string `json:"description"` +} + +type ArtifactMeta struct { + Steps []Step `json:"steps"` +} + +func (m *ArtifactMeta) StepNames() []string { + stepNames := make([]string, len(m.Steps)) + for i, step := range m.Steps { + stepNames[i] = step.ID + } + return stepNames +} + +type DeployerOut struct { + Messages []Message + Steps []string + Artifacts map[string]json.RawMessage +} + +func (out *DeployerOut) ArtifactMeta() (*ArtifactMeta, error) { + var meta ArtifactMeta + err := json.Unmarshal(out.Artifacts["meta"], &meta) + if err != nil { + return nil, err + } + return &meta, nil +} From 8bfb0d23bc8ae5367ba9e78d2ee3e2e1823dfac8 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Tue, 17 Sep 2024 10:25:31 -0400 Subject: [PATCH 045/104] run deployer tests during preflight --- scripts/preflight.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/preflight.sh b/scripts/preflight.sh index abdbaf69b6..3e94fae4fc 100755 --- a/scripts/preflight.sh +++ b/scripts/preflight.sh @@ -46,7 +46,7 @@ set +e gotesplit \ -total "$total" \ -index "$index" \ - github.com/superfly/flyctl/test/preflight/... \ + github.com/superfly/flyctl/test/preflight/... github.com/superfly/flyctl/test/deployer/... \ -- --tags=integration -v -timeout=10m $test_opts | tee "$test_log" test_status=$? From 39171ae9d7209580896023731b5b586101d6732d Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Tue, 17 Sep 2024 15:15:19 -0400 Subject: [PATCH 046/104] add launch test --- deploy.rb | 28 +++++++----- internal/command/launch/plan_commands.go | 2 +- test/deployer/deployer_test.go | 54 ++++++++++++++++++++++-- test/testlib/deployer.go | 27 +++++++++++- 4 files changed, 96 insertions(+), 15 deletions(-) diff --git a/deploy.rb b/deploy.rb index e5e7fb2695..f74590641d 100755 --- a/deploy.rb +++ b/deploy.rb @@ -25,6 +25,8 @@ DEPLOY_APP_REGION = get_env("DEPLOY_APP_REGION") +DEPLOY_COPY_CONFIG = get_env("DEPLOY_COPY_CONFIG") + GIT_REPO = get_env("GIT_REPO") GIT_REPO_URL = if GIT_REPO @@ -104,7 +106,7 @@ cmd += " --region #{region}" end - cmd += " --copy-config" if get_env("DEPLOY_COPY_CONFIG") + cmd += " --copy-config" if DEPLOY_COPY_CONFIG exec_capture(cmd).chomp @@ -225,14 +227,16 @@ ORG_SLUG = manifest["plan"]["org"] APP_REGION = manifest["plan"]["region"] + DO_GEN_REQS = !DEPLOY_COPY_CONFIG + FLY_PG = manifest.dig("plan", "postgres", "fly_postgres") SUPABASE = manifest.dig("plan", "postgres", "supabase_postgres") UPSTASH = manifest.dig("plan", "redis", "upstash_redis") TIGRIS = manifest.dig("plan", "object_storage", "tigris_object_storage") SENTRY = manifest.dig("plan", "sentry") == true - steps.push({id: Step::GENERATE_BUILD_REQUIREMENTS, description: "Generate requirements for build"}) - steps.push({id: Step::BUILD, description: "Build image"}) if GIT_REPO + steps.push({id: Step::GENERATE_BUILD_REQUIREMENTS, description: "Generate requirements for build"}) if DO_GEN_REQS + steps.push({id: Step::BUILD, description: "Build image"}) steps.push({id: Step::FLY_POSTGRES_CREATE, description: "Create and attach PostgreSQL database"}) if FLY_PG steps.push({id: Step::SUPABASE_POSTGRES, description: "Create Supabase PostgreSQL database"}) if SUPABASE steps.push({id: Step::UPSTASH_REDIS, description: "Create Upstash Redis database"}) if UPSTASH @@ -246,11 +250,15 @@ # Join the parallel task thread deps_thread.join - in_step Step::GENERATE_BUILD_REQUIREMENTS do - exec_capture("flyctl launch plan generate #{MANIFEST_PATH}") - exec_capture("git add -A", log: false) - diff = exec_capture("git diff --cached", log: false) - artifact Artifact::DIFF, { output: diff } + if DO_GEN_REQS + in_step Step::GENERATE_BUILD_REQUIREMENTS do + exec_capture("flyctl launch plan generate #{MANIFEST_PATH}") + if GIT_REPO + exec_capture("git add -A", log: false) + diff = exec_capture("git diff --cached", log: false) + artifact Artifact::DIFF, { output: diff } + end + end end end @@ -266,7 +274,7 @@ else image_ref = "registry.fly.io/#{APP_NAME}:#{image_tag}" - exec_capture("flyctl deploy --build-only --push -a #{APP_NAME} #{conf_arg} --image-label #{image_tag}") + exec_capture("flyctl deploy --build-only --push -a #{APP_NAME} --image-label #{image_tag}") artifact Artifact::DOCKER_IMAGE, { ref: image_ref } image_ref end @@ -369,7 +377,7 @@ if DEPLOY_NOW in_step Step::DEPLOY do - exec_capture("flyctl deploy -a #{APP_NAME} #{conf_arg} --image #{image_ref}") + exec_capture("flyctl deploy -a #{APP_NAME} --image #{image_ref}") end end diff --git a/internal/command/launch/plan_commands.go b/internal/command/launch/plan_commands.go index a241e69b9c..6c6a33a4b0 100644 --- a/internal/command/launch/plan_commands.go +++ b/internal/command/launch/plan_commands.go @@ -30,7 +30,7 @@ func NewPlan() *cobra.Command { func newPropose() *cobra.Command { const desc = "[experimental] propose a plan based on scanning the source code or Dockerfile" - cmd := command.New("propose", desc, desc, runPropose) + cmd := command.New("propose", desc, desc, runPropose, command.LoadAppConfigIfPresent) flag.Add(cmd, flag.Region(), diff --git a/test/deployer/deployer_test.go b/test/deployer/deployer_test.go index dd31a055ab..4d4d254b75 100644 --- a/test/deployer/deployer_test.go +++ b/test/deployer/deployer_test.go @@ -65,7 +65,55 @@ func TestDeployBasicNode(t *testing.T) { require.Equal(t, out.Steps, stepNames) } -type log struct { - stream uint8 - data []byte +func TestLaunchBasicNode(t *testing.T) { + ctx := context.TODO() + d, err := testlib.NewDeployerTestEnvFromEnv(ctx, t) + require.NoError(t, err) + + defer d.Close() + + err = testlib.CopyFixtureIntoWorkDir(d.WorkDir(), "deploy-node") + require.NoError(t, err) + + flyTomlPath := fmt.Sprintf("%s/fly.toml", d.WorkDir()) + + appName := d.CreateRandomAppName() + require.NotEmpty(t, appName) + + err = testlib.OverwriteConfig(flyTomlPath, map[string]any{ + "app": "dummy-app-name", + "region": d.PrimaryRegion(), + "env": map[string]string{ + "TEST_ID": d.ID(), + }, + }) + require.NoError(t, err) + + // app required + d.Fly("apps create %s -o %s", appName, d.OrgSlug()) + + deploy := d.NewRun(testlib.WithApp(appName), testlib.WithCopyConfig, testlib.WithoutCustomize, testlib.WithouExtensions, testlib.DeployNow, testlib.WithAppSource(d.WorkDir())) + + defer deploy.Close() + + err = deploy.Start(ctx) + + require.Nil(t, err) + + out, err := deploy.Wait() + require.Nil(t, err) + + require.Zero(t, deploy.ExitCode()) + + body, err := testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev", appName)) + require.NoError(t, err) + + require.Contains(t, string(body), fmt.Sprintf("Hello, World! %s", d.ID())) + + meta, err := out.ArtifactMeta() + require.NoError(t, err) + + stepNames := append([]string{"__root__"}, meta.StepNames()...) + + require.Equal(t, out.Steps, stepNames) } diff --git a/test/testlib/deployer.go b/test/testlib/deployer.go index d0a6bfb3db..095fbb070e 100644 --- a/test/testlib/deployer.go +++ b/test/testlib/deployer.go @@ -86,6 +86,10 @@ type DeployTestRun struct { gitRepo string gitRef string + noCustomize bool + skipExtensions bool + copyConfig bool + deployOnly bool deployNow bool @@ -121,9 +125,20 @@ func WithGitRef(ref string) func(*DeployTestRun) { } } +func WithoutCustomize(d *DeployTestRun) { + d.noCustomize = true +} + +func WithouExtensions(d *DeployTestRun) { + d.skipExtensions = true +} + +func WithCopyConfig(d *DeployTestRun) { + d.copyConfig = true +} + func DeployOnly(d *DeployTestRun) { d.deployOnly = true - } func DeployNow(d *DeployTestRun) { @@ -152,6 +167,16 @@ func (d *DeployTestRun) Start(ctx context.Context) error { env = append(env, fmt.Sprintf("GIT_REF=%s", d.gitRef)) } + if d.noCustomize { + env = append(env, "NO_DEPLOY_CUSTOMIZE=1") + } + if d.skipExtensions { + env = append(env, "SKIP_EXTENSIONS=1") + } + if d.copyConfig { + env = append(env, "DEPLOY_COPY_CONFIG=1") + } + if d.deployOnly { env = append(env, "DEPLOY_ONLY=1") } From 0ad2e56a94893de47150aace5859a3c62c77a451 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Wed, 18 Sep 2024 09:12:27 -0400 Subject: [PATCH 047/104] disable depot --- deploy.rb | 6 ++++-- test/deployer/deployer_test.go | 1 + 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/deploy.rb b/deploy.rb index f74590641d..b4a147c773 100755 --- a/deploy.rb +++ b/deploy.rb @@ -85,6 +85,8 @@ end end +HAS_FLY_CONFIG = Dir.entries(".").any? { |f| File.fnmatch('fly.{toml,json,yaml,yml}', f, File::FNM_EXTGLOB)} + # -c arg if any conf_arg = "" @@ -227,7 +229,7 @@ ORG_SLUG = manifest["plan"]["org"] APP_REGION = manifest["plan"]["region"] - DO_GEN_REQS = !DEPLOY_COPY_CONFIG + DO_GEN_REQS = !DEPLOY_COPY_CONFIG || !HAS_FLY_CONFIG FLY_PG = manifest.dig("plan", "postgres", "fly_postgres") SUPABASE = manifest.dig("plan", "postgres", "supabase_postgres") @@ -274,7 +276,7 @@ else image_ref = "registry.fly.io/#{APP_NAME}:#{image_tag}" - exec_capture("flyctl deploy --build-only --push -a #{APP_NAME} --image-label #{image_tag}") + exec_capture("flyctl deploy --build-only --depot=false --push -a #{APP_NAME} --image-label #{image_tag}") artifact Artifact::DOCKER_IMAGE, { ref: image_ref } image_ref end diff --git a/test/deployer/deployer_test.go b/test/deployer/deployer_test.go index 4d4d254b75..e201734786 100644 --- a/test/deployer/deployer_test.go +++ b/test/deployer/deployer_test.go @@ -14,6 +14,7 @@ import ( func TestDeployBasicNode(t *testing.T) { ctx := context.TODO() + d, err := testlib.NewDeployerTestEnvFromEnv(ctx, t) require.NoError(t, err) From 6f6c089942fb980c74e2627be69bc166682f6b2f Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Wed, 18 Sep 2024 14:12:02 -0400 Subject: [PATCH 048/104] go mod tidy --- go.mod | 9 +++++++-- go.sum | 12 ++++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index cdd4a6ac95..ea94f85f61 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/superfly/flyctl -go 1.22.0 +go 1.22.5 toolchain go1.22.6 @@ -58,6 +58,7 @@ require ( github.com/novln/docker-parser v1.0.0 github.com/oklog/ulid/v2 v2.1.0 github.com/olekukonko/tablewriter v0.0.5 + github.com/opencontainers/image-spec v1.1.0 github.com/pelletier/go-toml/v2 v2.2.3 github.com/pkg/errors v0.9.1 github.com/pkg/sftp v1.13.6 @@ -153,6 +154,7 @@ require ( github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect github.com/containerd/ttrpc v1.2.3 // indirect github.com/containerd/typeurl/v2 v2.1.1 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dimchansky/utfbom v1.1.1 // indirect @@ -218,7 +220,6 @@ require ( github.com/nats-io/nkeys v0.4.7 // indirect github.com/nats-io/nuid v1.0.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0 // indirect github.com/opencontainers/selinux v1.11.0 // indirect github.com/pierrec/lz4/v4 v4.1.17 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect @@ -227,6 +228,7 @@ require ( github.com/prometheus/procfs v0.15.1 // indirect github.com/rivo/tview v0.0.0-20220307222120-9994674d60a8 // indirect github.com/rivo/uniseg v0.4.3 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect @@ -235,6 +237,7 @@ require ( github.com/shibumi/go-pathspec v1.3.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/skeema/knownhosts v1.2.2 // indirect + github.com/sosodev/duration v1.3.1 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect @@ -242,6 +245,7 @@ require ( github.com/tonistiigi/fsutil v0.0.0-20240424095704-91a3fc46842c // indirect github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect github.com/tonistiigi/vt100 v0.0.0-20230623042737-f9a4f7ef6531 // indirect + github.com/urfave/cli/v2 v2.27.4 // indirect github.com/vbatts/tar-split v0.11.5 // indirect github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect @@ -249,6 +253,7 @@ require ( github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect + github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 // indirect go.opentelemetry.io/otel/metric v1.30.0 // indirect diff --git a/go.sum b/go.sum index de535d3d0c..64d9e09617 100644 --- a/go.sum +++ b/go.sum @@ -3,6 +3,8 @@ connectrpc.com/connect v1.16.1 h1:rOdrK/RTI/7TVnn3JsVxt3n028MlTRwmK5Q4heSpjis= connectrpc.com/connect v1.16.1/go.mod h1:XpZAduBQUySsb4/KO5JffORVkDI4B6/EYPi7N8xpNZw= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +github.com/99designs/gqlgen v0.17.53 h1:FJOJaF96d7Y5EBpoaLG96fz1NR6B8bFdCZI1yZwYArM= +github.com/99designs/gqlgen v0.17.53/go.mod h1:77/+pVe6zlTsz++oUg2m8VLgzdUPHxjoAG3BxI5y8Rc= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 h1:59MxjQVfjXsBpLy+dbd2/ELV5ofnUkUZBvWSC85sheA= @@ -204,6 +206,7 @@ github.com/containerd/ttrpc v1.2.3 h1:4jlhbXIGvijRtNC8F/5CpuJZ7yKOBFGFOOXg1bkISz github.com/containerd/ttrpc v1.2.3/go.mod h1:ieWsXucbb8Mj9PH0rXCw1i8IunRbbAiDkpXkbfflWBM= github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4= github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= @@ -564,6 +567,8 @@ github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUc github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDjyw0ULyrTYWeN0UNCCkmCWfjPnIA2W6oviI= github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs= @@ -594,6 +599,8 @@ github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:s github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= +github.com/sosodev/duration v1.3.1 h1:qtHBDMQ6lvMQsL15g4aopM4HEfOaYuhWBw3NPTtlqq4= +github.com/sosodev/duration v1.3.1/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spdx/tools-golang v0.5.3 h1:ialnHeEYUC4+hkm5vJm4qz2x+oEJbS0mAMFrNXdQraY= @@ -648,6 +655,9 @@ github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/ github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk= github.com/tonistiigi/vt100 v0.0.0-20230623042737-f9a4f7ef6531 h1:Y/M5lygoNPKwVNLMPXgVfsRT40CSFKXCxuU8LoHySjs= github.com/tonistiigi/vt100 v0.0.0-20230623042737-f9a4f7ef6531/go.mod h1:ulncasL3N9uLrVann0m+CDlJKWsIAP34MPcOJF6VRvc= +github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk= +github.com/urfave/cli/v2 v2.27.4 h1:o1owoI+02Eb+K107p27wEX9Bb8eqIoZCfLXloLUSWJ8= +github.com/urfave/cli/v2 v2.27.4/go.mod h1:m4QzxcD2qpra4z7WhzEGn74WZLViBnMpb1ToCAKdGRQ= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= github.com/vektah/gqlparser/v2 v2.5.16 h1:1gcmLTvs3JLKXckwCwlUagVn/IlV2bwqle0vJ0vy5p8= @@ -665,6 +675,8 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= From 7adf153230c38d62de5023c435859e9da70aff88 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Wed, 18 Sep 2024 14:12:09 -0400 Subject: [PATCH 049/104] add go-example test --- test/deployer/deployer_test.go | 44 ++++++++++++++++++++++++---------- test/testlib/deployer.go | 28 +++++++++++++++++++++- 2 files changed, 59 insertions(+), 13 deletions(-) diff --git a/test/deployer/deployer_test.go b/test/deployer/deployer_test.go index e201734786..24e490fb39 100644 --- a/test/deployer/deployer_test.go +++ b/test/deployer/deployer_test.go @@ -48,7 +48,7 @@ func TestDeployBasicNode(t *testing.T) { require.Nil(t, err) - out, err := deploy.Wait() + _, err = deploy.Wait() require.Nil(t, err) require.Zero(t, deploy.ExitCode()) @@ -57,17 +57,11 @@ func TestDeployBasicNode(t *testing.T) { require.NoError(t, err) require.Contains(t, string(body), fmt.Sprintf("Hello, World! %s", d.ID())) - - meta, err := out.ArtifactMeta() - require.NoError(t, err) - - stepNames := append([]string{"__root__"}, meta.StepNames()...) - - require.Equal(t, out.Steps, stepNames) } func TestLaunchBasicNode(t *testing.T) { ctx := context.TODO() + d, err := testlib.NewDeployerTestEnvFromEnv(ctx, t) require.NoError(t, err) @@ -101,7 +95,7 @@ func TestLaunchBasicNode(t *testing.T) { require.Nil(t, err) - out, err := deploy.Wait() + _, err = deploy.Wait() require.Nil(t, err) require.Zero(t, deploy.ExitCode()) @@ -110,11 +104,37 @@ func TestLaunchBasicNode(t *testing.T) { require.NoError(t, err) require.Contains(t, string(body), fmt.Sprintf("Hello, World! %s", d.ID())) +} + +func TestLaunchGoFromRepo(t *testing.T) { + ctx := context.TODO() - meta, err := out.ArtifactMeta() + d, err := testlib.NewDeployerTestEnvFromEnv(ctx, t) require.NoError(t, err) - stepNames := append([]string{"__root__"}, meta.StepNames()...) + defer d.Close() + + appName := d.CreateRandomAppName() + require.NotEmpty(t, appName) + + // app required + d.Fly("apps create %s -o %s", appName, d.OrgSlug()) + + deploy := d.NewRun(testlib.WithApp(appName), testlib.WithRegion("yyz"), testlib.WithoutCustomize, testlib.WithouExtensions, testlib.DeployNow, testlib.WithGitRepo("https://github.com/fly-apps/go-example")) + + defer deploy.Close() + + err = deploy.Start(ctx) + + require.Nil(t, err) + + _, err = deploy.Wait() + require.Nil(t, err) + + require.Zero(t, deploy.ExitCode()) + + body, err := testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev", appName)) + require.NoError(t, err) - require.Equal(t, out.Steps, stepNames) + require.Contains(t, string(body), "I'm running in the yyz region") } diff --git a/test/testlib/deployer.go b/test/testlib/deployer.go index 095fbb070e..dd2934df82 100644 --- a/test/testlib/deployer.go +++ b/test/testlib/deployer.go @@ -67,7 +67,7 @@ func (d *DeployerTestEnv) Close() error { } func (d *DeployerTestEnv) NewRun(options ...func(*DeployTestRun)) *DeployTestRun { - run := &DeployTestRun{dockerClient: d.dockerClient, deployerImage: d.image, apiToken: d.FlyctlTestEnv.AccessToken(), orgSlug: d.FlyctlTestEnv.OrgSlug(), containerBinds: []string{}} + run := &DeployTestRun{t: d.t, dockerClient: d.dockerClient, deployerImage: d.image, apiToken: d.FlyctlTestEnv.AccessToken(), orgSlug: d.FlyctlTestEnv.OrgSlug(), containerBinds: []string{}} for _, o := range options { o(run) } @@ -75,6 +75,7 @@ func (d *DeployerTestEnv) NewRun(options ...func(*DeployTestRun)) *DeployTestRun } type DeployTestRun struct { + t testing.TB dockerClient *client.Client deployerImage string @@ -86,6 +87,8 @@ type DeployTestRun struct { gitRepo string gitRef string + region string + noCustomize bool skipExtensions bool copyConfig bool @@ -125,6 +128,12 @@ func WithGitRef(ref string) func(*DeployTestRun) { } } +func WithRegion(region string) func(*DeployTestRun) { + return func(d *DeployTestRun) { + d.region = region + } +} + func WithoutCustomize(d *DeployTestRun) { d.noCustomize = true } @@ -167,6 +176,10 @@ func (d *DeployTestRun) Start(ctx context.Context) error { env = append(env, fmt.Sprintf("GIT_REF=%s", d.gitRef)) } + if d.region != "" { + env = append(env, fmt.Sprintf("DEPLOY_APP_REGION=%s", d.region)) + } + if d.noCustomize { env = append(env, "NO_DEPLOY_CUSTOMIZE=1") } @@ -323,6 +336,10 @@ func (d *DeployTestRun) Start(ctx context.Context) error { err = we } } + + if d.err == nil && d.exitCode == 0 { + d.checkAssertions() + } }() return nil @@ -355,6 +372,15 @@ func (d *DeployTestRun) Close() error { }) } +func (d *DeployTestRun) checkAssertions() { + meta, err := d.out.ArtifactMeta() + require.NoError(d.t, err) + + stepNames := append([]string{"__root__"}, meta.StepNames()...) + + require.Equal(d.t, d.out.Steps, stepNames) +} + type log struct { stream uint8 data []byte From ef780b9475c925816af7898463b0c5e8ae5973c4 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Wed, 18 Sep 2024 16:08:21 -0400 Subject: [PATCH 050/104] refactor deployer tests to make them more succinct --- test/deployer/deployer_test.go | 157 ++++++++++++++++----------------- test/testlib/deployer.go | 68 +++++++------- 2 files changed, 110 insertions(+), 115 deletions(-) diff --git a/test/deployer/deployer_test.go b/test/deployer/deployer_test.go index 24e490fb39..c66cc67a87 100644 --- a/test/deployer/deployer_test.go +++ b/test/deployer/deployer_test.go @@ -13,100 +13,92 @@ import ( ) func TestDeployBasicNode(t *testing.T) { - ctx := context.TODO() - - d, err := testlib.NewDeployerTestEnvFromEnv(ctx, t) - require.NoError(t, err) - - defer d.Close() - - err = testlib.CopyFixtureIntoWorkDir(d.WorkDir(), "deploy-node") - require.NoError(t, err) - - flyTomlPath := fmt.Sprintf("%s/fly.toml", d.WorkDir()) - - appName := d.CreateRandomAppName() - require.NotEmpty(t, appName) - - err = testlib.OverwriteConfig(flyTomlPath, map[string]any{ - "app": appName, - "region": d.PrimaryRegion(), - "env": map[string]string{ - "TEST_ID": d.ID(), - }, - }) + deploy := testDeployer(t, withFixtureApp("deploy-node"), createRandomApp, withOverwrittenConfig(func(d *testlib.DeployTestRun) map[string]any { + return map[string]any{ + "app": d.Extra["appName"], + "region": d.PrimaryRegion(), + "env": map[string]string{ + "TEST_ID": d.ID(), + }, + } + }), testlib.DeployOnly, testlib.DeployNow, withWorkDirAppSource) + + body, err := testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev", deploy.Extra["appName"].(string))) require.NoError(t, err) - // app required - d.Fly("apps create %s -o %s", appName, d.OrgSlug()) - - deploy := d.NewRun(testlib.DeployOnly, testlib.DeployNow, testlib.WithAppSource(d.WorkDir())) - - defer deploy.Close() - - err = deploy.Start(ctx) - - require.Nil(t, err) - - _, err = deploy.Wait() - require.Nil(t, err) + require.Contains(t, string(body), fmt.Sprintf("Hello, World! %s", deploy.Extra["TEST_ID"].(string))) +} - require.Zero(t, deploy.ExitCode()) +func TestLaunchBasicNode(t *testing.T) { + deploy := testDeployer(t, withFixtureApp("deploy-node"), withOverwrittenConfig(func(d *testlib.DeployTestRun) map[string]any { + return map[string]any{ + "app": "dummy-app-name", + "region": d.PrimaryRegion(), + "env": map[string]string{ + "TEST_ID": d.ID(), + }, + } + }), createRandomApp, testlib.WithCopyConfig, testlib.WithoutCustomize, testlib.WithouExtensions, testlib.DeployNow, withWorkDirAppSource) + + appName := deploy.Extra["appName"].(string) body, err := testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev", appName)) require.NoError(t, err) - require.Contains(t, string(body), fmt.Sprintf("Hello, World! %s", d.ID())) + require.Contains(t, string(body), fmt.Sprintf("Hello, World! %s", deploy.Extra["TEST_ID"].(string))) } -func TestLaunchBasicNode(t *testing.T) { - ctx := context.TODO() - - d, err := testlib.NewDeployerTestEnvFromEnv(ctx, t) - require.NoError(t, err) +func TestLaunchGoFromRepo(t *testing.T) { + deploy := testDeployer(t, createRandomApp, testlib.WithRegion("yyz"), testlib.WithoutCustomize, testlib.WithouExtensions, testlib.DeployNow, testlib.WithGitRepo("https://github.com/fly-apps/go-example")) - defer d.Close() + appName := deploy.Extra["appName"].(string) - err = testlib.CopyFixtureIntoWorkDir(d.WorkDir(), "deploy-node") + body, err := testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev", appName)) require.NoError(t, err) - flyTomlPath := fmt.Sprintf("%s/fly.toml", d.WorkDir()) + require.Contains(t, string(body), "I'm running in the yyz region") +} +func createRandomApp(d *testlib.DeployTestRun) { appName := d.CreateRandomAppName() - require.NotEmpty(t, appName) - - err = testlib.OverwriteConfig(flyTomlPath, map[string]any{ - "app": "dummy-app-name", - "region": d.PrimaryRegion(), - "env": map[string]string{ - "TEST_ID": d.ID(), - }, - }) - require.NoError(t, err) + require.NotEmpty(d, appName) - // app required d.Fly("apps create %s -o %s", appName, d.OrgSlug()) + d.Extra["appName"] = appName - deploy := d.NewRun(testlib.WithApp(appName), testlib.WithCopyConfig, testlib.WithoutCustomize, testlib.WithouExtensions, testlib.DeployNow, testlib.WithAppSource(d.WorkDir())) - - defer deploy.Close() - - err = deploy.Start(ctx) - - require.Nil(t, err) - - _, err = deploy.Wait() - require.Nil(t, err) + testlib.WithApp(appName)(d) +} - require.Zero(t, deploy.ExitCode()) +func withFixtureApp(name string) func(*testlib.DeployTestRun) { + return func(d *testlib.DeployTestRun) { + err := testlib.CopyFixtureIntoWorkDir(d.WorkDir(), name) + require.NoError(d, err) + } +} - body, err := testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev", appName)) - require.NoError(t, err) +func withOverwrittenConfig(raw any) func(*testlib.DeployTestRun) { + return func(d *testlib.DeployTestRun) { + flyTomlPath := fmt.Sprintf("%s/fly.toml", d.WorkDir()) + data := make(map[string]any) + switch cast := raw.(type) { + case map[string]any: + data = cast + case func(*testlib.DeployTestRun) map[string]any: + data = cast(d) + default: + fmt.Println(cast) + d.Fatal("failed to cast template data") + } + err := testlib.OverwriteConfig(flyTomlPath, data) + require.NoError(d, err) + } +} - require.Contains(t, string(body), fmt.Sprintf("Hello, World! %s", d.ID())) +func withWorkDirAppSource(d *testlib.DeployTestRun) { + testlib.WithAppSource(d.WorkDir())(d) } -func TestLaunchGoFromRepo(t *testing.T) { +func testDeployer(t *testing.T, options ...func(*testlib.DeployTestRun)) *testlib.DeployTestRun { ctx := context.TODO() d, err := testlib.NewDeployerTestEnvFromEnv(ctx, t) @@ -114,27 +106,28 @@ func TestLaunchGoFromRepo(t *testing.T) { defer d.Close() - appName := d.CreateRandomAppName() - require.NotEmpty(t, appName) - - // app required - d.Fly("apps create %s -o %s", appName, d.OrgSlug()) - - deploy := d.NewRun(testlib.WithApp(appName), testlib.WithRegion("yyz"), testlib.WithoutCustomize, testlib.WithouExtensions, testlib.DeployNow, testlib.WithGitRepo("https://github.com/fly-apps/go-example")) - + deploy := d.NewRun(options...) defer deploy.Close() + deploy.Extra["TEST_ID"] = d.ID() + err = deploy.Start(ctx) require.Nil(t, err) - _, err = deploy.Wait() + err = deploy.Wait() require.Nil(t, err) require.Zero(t, deploy.ExitCode()) - body, err := testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev", appName)) + out := deploy.Output() + + meta, err := out.ArtifactMeta() require.NoError(t, err) - require.Contains(t, string(body), "I'm running in the yyz region") + stepNames := append([]string{"__root__"}, meta.StepNames()...) + + require.Equal(t, out.Steps, stepNames) + + return deploy } diff --git a/test/testlib/deployer.go b/test/testlib/deployer.go index dd2934df82..8717520583 100644 --- a/test/testlib/deployer.go +++ b/test/testlib/deployer.go @@ -67,7 +67,7 @@ func (d *DeployerTestEnv) Close() error { } func (d *DeployerTestEnv) NewRun(options ...func(*DeployTestRun)) *DeployTestRun { - run := &DeployTestRun{t: d.t, dockerClient: d.dockerClient, deployerImage: d.image, apiToken: d.FlyctlTestEnv.AccessToken(), orgSlug: d.FlyctlTestEnv.OrgSlug(), containerBinds: []string{}} + run := &DeployTestRun{FlyctlTestEnv: d.FlyctlTestEnv, dockerClient: d.dockerClient, deployerImage: d.image, apiToken: d.FlyctlTestEnv.AccessToken(), orgSlug: d.FlyctlTestEnv.OrgSlug(), containerBinds: []string{}, Extra: make(map[string]interface{})} for _, o := range options { o(run) } @@ -75,7 +75,7 @@ func (d *DeployerTestEnv) NewRun(options ...func(*DeployTestRun)) *DeployTestRun } type DeployTestRun struct { - t testing.TB + *FlyctlTestEnv dockerClient *client.Client deployerImage string @@ -108,6 +108,8 @@ type DeployTestRun struct { done bool out *DeployerOut err error + + Extra map[string]interface{} } func WithApp(app string) func(*DeployTestRun) { @@ -336,27 +338,23 @@ func (d *DeployTestRun) Start(ctx context.Context) error { err = we } } - - if d.err == nil && d.exitCode == 0 { - d.checkAssertions() - } }() return nil } -func (d *DeployTestRun) Wait() (*DeployerOut, error) { +func (d *DeployTestRun) Wait() error { if d.done { if d.err != nil { - return nil, d.err + return d.err } - return d.out, nil + return nil } select { - case out := <-d.waitCh: - return out, nil + case <-d.waitCh: + return nil case err := <-d.waitErrCh: - return nil, err + return err } } @@ -364,6 +362,10 @@ func (d *DeployTestRun) ExitCode() int64 { return d.exitCode } +func (d *DeployTestRun) Output() *DeployerOut { + return d.out +} + func (d *DeployTestRun) Close() error { return d.dockerClient.ContainerRemove(context.TODO(), d.containerID, container.RemoveOptions{ RemoveVolumes: true, @@ -372,15 +374,6 @@ func (d *DeployTestRun) Close() error { }) } -func (d *DeployTestRun) checkAssertions() { - meta, err := d.out.ArtifactMeta() - require.NoError(d.t, err) - - stepNames := append([]string{"__root__"}, meta.StepNames()...) - - require.Equal(d.t, d.out.Steps, stepNames) -} - type log struct { stream uint8 data []byte @@ -400,18 +393,6 @@ type Step struct { Description string `json:"description"` } -type ArtifactMeta struct { - Steps []Step `json:"steps"` -} - -func (m *ArtifactMeta) StepNames() []string { - stepNames := make([]string, len(m.Steps)) - for i, step := range m.Steps { - stepNames[i] = step.ID - } - return stepNames -} - type DeployerOut struct { Messages []Message Steps []string @@ -426,3 +407,24 @@ func (out *DeployerOut) ArtifactMeta() (*ArtifactMeta, error) { } return &meta, nil } + +type ArtifactMeta struct { + Steps []Step `json:"steps"` +} + +func (m *ArtifactMeta) StepNames() []string { + stepNames := make([]string, len(m.Steps)) + for i, step := range m.Steps { + stepNames[i] = step.ID + } + return stepNames +} + +// func (out *DeployerOut) ArtifactManifest() (*launch.LaunchManifest, error) { +// var manifest launch.LaunchManifest +// err := json.Unmarshal(out.Artifacts["manifest"], &manifest) +// if err != nil { +// return nil, err +// } +// return &manifest, nil +// } From cd59492e73e78988ee329bb01a57a39e8b71f1ee Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Wed, 25 Sep 2024 10:26:13 -0400 Subject: [PATCH 051/104] more readable tests --- test/deployer/deployer_test.go | 61 +++++++++++++++++++++++----------- test/testlib/deployer.go | 17 +++++----- 2 files changed, 51 insertions(+), 27 deletions(-) diff --git a/test/deployer/deployer_test.go b/test/deployer/deployer_test.go index c66cc67a87..e0e7b33b4d 100644 --- a/test/deployer/deployer_test.go +++ b/test/deployer/deployer_test.go @@ -13,15 +13,22 @@ import ( ) func TestDeployBasicNode(t *testing.T) { - deploy := testDeployer(t, withFixtureApp("deploy-node"), createRandomApp, withOverwrittenConfig(func(d *testlib.DeployTestRun) map[string]any { - return map[string]any{ - "app": d.Extra["appName"], - "region": d.PrimaryRegion(), - "env": map[string]string{ - "TEST_ID": d.ID(), - }, - } - }), testlib.DeployOnly, testlib.DeployNow, withWorkDirAppSource) + deploy := testDeployer(t, + withFixtureApp("deploy-node"), + createRandomApp, + withOverwrittenConfig(func(d *testlib.DeployTestRun) map[string]any { + return map[string]any{ + "app": d.Extra["appName"], + "region": d.PrimaryRegion(), + "env": map[string]string{ + "TEST_ID": d.ID(), + }, + } + }), + testlib.DeployOnly, + testlib.DeployNow, + withWorkDirAppSource, + ) body, err := testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev", deploy.Extra["appName"].(string))) require.NoError(t, err) @@ -30,15 +37,24 @@ func TestDeployBasicNode(t *testing.T) { } func TestLaunchBasicNode(t *testing.T) { - deploy := testDeployer(t, withFixtureApp("deploy-node"), withOverwrittenConfig(func(d *testlib.DeployTestRun) map[string]any { - return map[string]any{ - "app": "dummy-app-name", - "region": d.PrimaryRegion(), - "env": map[string]string{ - "TEST_ID": d.ID(), - }, - } - }), createRandomApp, testlib.WithCopyConfig, testlib.WithoutCustomize, testlib.WithouExtensions, testlib.DeployNow, withWorkDirAppSource) + deploy := testDeployer(t, + withFixtureApp("deploy-node"), + withOverwrittenConfig(func(d *testlib.DeployTestRun) map[string]any { + return map[string]any{ + "app": "dummy-app-name", + "region": d.PrimaryRegion(), + "env": map[string]string{ + "TEST_ID": d.ID(), + }, + } + }), + createRandomApp, + testlib.WithCopyConfig, + testlib.WithoutCustomize, + testlib.WithouExtensions, + testlib.DeployNow, + withWorkDirAppSource, + ) appName := deploy.Extra["appName"].(string) @@ -49,7 +65,14 @@ func TestLaunchBasicNode(t *testing.T) { } func TestLaunchGoFromRepo(t *testing.T) { - deploy := testDeployer(t, createRandomApp, testlib.WithRegion("yyz"), testlib.WithoutCustomize, testlib.WithouExtensions, testlib.DeployNow, testlib.WithGitRepo("https://github.com/fly-apps/go-example")) + deploy := testDeployer(t, + createRandomApp, + testlib.WithRegion("yyz"), + testlib.WithoutCustomize, + testlib.WithouExtensions, + testlib.DeployNow, + testlib.WithGitRepo("https://github.com/fly-apps/go-example"), + ) appName := deploy.Extra["appName"].(string) diff --git a/test/testlib/deployer.go b/test/testlib/deployer.go index 8717520583..b69e961804 100644 --- a/test/testlib/deployer.go +++ b/test/testlib/deployer.go @@ -21,6 +21,7 @@ import ( v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/stretchr/testify/require" + "github.com/superfly/flyctl/internal/command/launch" ) type DeployerTestEnv struct { @@ -420,11 +421,11 @@ func (m *ArtifactMeta) StepNames() []string { return stepNames } -// func (out *DeployerOut) ArtifactManifest() (*launch.LaunchManifest, error) { -// var manifest launch.LaunchManifest -// err := json.Unmarshal(out.Artifacts["manifest"], &manifest) -// if err != nil { -// return nil, err -// } -// return &manifest, nil -// } +func (out *DeployerOut) ArtifactManifest() (*launch.LaunchManifest, error) { + var manifest launch.LaunchManifest + err := json.Unmarshal(out.Artifacts["manifest"], &manifest) + if err != nil { + return nil, err + } + return &manifest, nil +} From b032feb8076c481d15b38986101abbf1defd052f Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Fri, 27 Sep 2024 11:34:19 -0400 Subject: [PATCH 052/104] fix node scanner --- deploy.rb | 9 ++----- scanner/node.go | 3 +++ test/deployer/deployer_test.go | 26 ++++++++++++++++++- .../deploy-node-no-dockerfile/index.js | 13 ++++++++++ .../package-lock.json | 12 +++++++++ .../deploy-node-no-dockerfile/package.json | 8 ++++++ .../deploy-node-no-dockerfile/somefile | 0 test/testlib/helpers.go | 13 ++++++++-- 8 files changed, 74 insertions(+), 10 deletions(-) create mode 100644 test/fixtures/deploy-node-no-dockerfile/index.js create mode 100644 test/fixtures/deploy-node-no-dockerfile/package-lock.json create mode 100644 test/fixtures/deploy-node-no-dockerfile/package.json create mode 100644 test/fixtures/deploy-node-no-dockerfile/somefile diff --git a/deploy.rb b/deploy.rb index b4a147c773..7bfe7761fa 100755 --- a/deploy.rb +++ b/deploy.rb @@ -87,9 +87,6 @@ HAS_FLY_CONFIG = Dir.entries(".").any? { |f| File.fnmatch('fly.{toml,json,yaml,yml}', f, File::FNM_EXTGLOB)} -# -c arg if any -conf_arg = "" - if !DEPLOY_ONLY MANIFEST_PATH = "/tmp/manifest.json" @@ -222,15 +219,13 @@ end end - # Write the fly config file to a tmp directory - File.write("/tmp/fly.json", manifest["config"].to_json) - conf_arg = "-c /tmp/fly.json" - ORG_SLUG = manifest["plan"]["org"] APP_REGION = manifest["plan"]["region"] DO_GEN_REQS = !DEPLOY_COPY_CONFIG || !HAS_FLY_CONFIG + debug("generate reqs? #{DO_GEN_REQS}") + FLY_PG = manifest.dig("plan", "postgres", "fly_postgres") SUPABASE = manifest.dig("plan", "postgres", "supabase_postgres") UPSTASH = manifest.dig("plan", "redis", "upstash_redis") diff --git a/scanner/node.go b/scanner/node.go index f51783a54a..85804b2353 100644 --- a/scanner/node.go +++ b/scanner/node.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/superfly/flyctl/helpers" + "github.com/superfly/flyctl/internal/command/launch/plan" ) func configureNode(sourceDir string, config *ScannerConfig) (*SourceInfo, error) { @@ -177,5 +178,7 @@ Now: run 'fly deploy' to deploy your Node app. s.Env = env + s.Runtime = plan.RuntimeStruct{Language: "node", Version: nodeVersion} + return s, nil } diff --git a/test/deployer/deployer_test.go b/test/deployer/deployer_test.go index e0e7b33b4d..f2bf591b51 100644 --- a/test/deployer/deployer_test.go +++ b/test/deployer/deployer_test.go @@ -36,7 +36,7 @@ func TestDeployBasicNode(t *testing.T) { require.Contains(t, string(body), fmt.Sprintf("Hello, World! %s", deploy.Extra["TEST_ID"].(string))) } -func TestLaunchBasicNode(t *testing.T) { +func TestLaunchBasicNodeWithDockerfile(t *testing.T) { deploy := testDeployer(t, withFixtureApp("deploy-node"), withOverwrittenConfig(func(d *testlib.DeployTestRun) map[string]any { @@ -64,6 +64,30 @@ func TestLaunchBasicNode(t *testing.T) { require.Contains(t, string(body), fmt.Sprintf("Hello, World! %s", deploy.Extra["TEST_ID"].(string))) } +func TestLaunchBasicNode(t *testing.T) { + deploy := testDeployer(t, + withFixtureApp("deploy-node-no-dockerfile"), + createRandomApp, + testlib.WithoutCustomize, + testlib.WithouExtensions, + testlib.DeployNow, + withWorkDirAppSource, + ) + + manifest, err := deploy.Output().ArtifactManifest() + require.NoError(t, err) + require.NotNil(t, manifest) + + require.Equal(t, manifest.Plan.Runtime.Language, "node") + + appName := deploy.Extra["appName"].(string) + + body, err := testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev", appName)) + require.NoError(t, err) + + require.Equal(t, string(body), "Hello, World!") +} + func TestLaunchGoFromRepo(t *testing.T) { deploy := testDeployer(t, createRandomApp, diff --git a/test/fixtures/deploy-node-no-dockerfile/index.js b/test/fixtures/deploy-node-no-dockerfile/index.js new file mode 100644 index 0000000000..ee281aeee0 --- /dev/null +++ b/test/fixtures/deploy-node-no-dockerfile/index.js @@ -0,0 +1,13 @@ +const http = require('http'); + +http.createServer((request, response) => { + response.writeHead(200, + { + 'Content-Type': 'text/plain' + } + ); + + response.write("Hello, World!"); + response.end(); + +}).listen(8080); diff --git a/test/fixtures/deploy-node-no-dockerfile/package-lock.json b/test/fixtures/deploy-node-no-dockerfile/package-lock.json new file mode 100644 index 0000000000..44a78d8204 --- /dev/null +++ b/test/fixtures/deploy-node-no-dockerfile/package-lock.json @@ -0,0 +1,12 @@ +{ + "name": "hello-node", + "version": "1.0.0", + "lockfileVersion": 2, + "requires": true, + "packages": { + "": { + "name": "hello-node", + "version": "1.0.0" + } + } +} diff --git a/test/fixtures/deploy-node-no-dockerfile/package.json b/test/fixtures/deploy-node-no-dockerfile/package.json new file mode 100644 index 0000000000..9a1d0260d5 --- /dev/null +++ b/test/fixtures/deploy-node-no-dockerfile/package.json @@ -0,0 +1,8 @@ +{ + "name": "hello-node", + "version": "1.0.0", + "main": "index.js", + "scripts": { + "start": "node index.js" + } +} diff --git a/test/fixtures/deploy-node-no-dockerfile/somefile b/test/fixtures/deploy-node-no-dockerfile/somefile new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/testlib/helpers.go b/test/testlib/helpers.go index 63cb15e424..9b0c5cfa9a 100644 --- a/test/testlib/helpers.go +++ b/test/testlib/helpers.go @@ -232,6 +232,7 @@ func OverwriteConfig(path string, data map[string]any) error { if err != nil { return err } + // fmt.Printf("CONFIG @ %s: %v\n", path, cfg) cfgEnv, err := castEnv(cfg["env"]) if err != nil { @@ -248,9 +249,17 @@ func OverwriteConfig(path string, data map[string]any) error { cfgEnv[k] = v } - cfg["app"] = data["app"] + if app, ok := data["app"]; ok { + cfg["app"] = app + } + cfg["env"] = cfgEnv - cfg["primary_region"] = data["region"] + + if region, ok := data["region"]; ok { + cfg["primary_region"] = region + } + + // fmt.Printf("FINAL CONFIG: %v\n", cfg) err = writeToml(path, cfg) if err != nil { From 1434718bf880de1754a312c82a740c2da3aab891 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Fri, 27 Sep 2024 12:45:16 -0400 Subject: [PATCH 053/104] possibly fix weird setup where override fly.toml should clear a templated value --- test/testlib/helpers.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/testlib/helpers.go b/test/testlib/helpers.go index 9b0c5cfa9a..efd0e8dfac 100644 --- a/test/testlib/helpers.go +++ b/test/testlib/helpers.go @@ -257,6 +257,8 @@ func OverwriteConfig(path string, data map[string]any) error { if region, ok := data["region"]; ok { cfg["primary_region"] = region + } else if v, ok := cfg["primary_region"].(string); ok && strings.HasPrefix(v, "{{") { + delete(cfg, "primary_region") } // fmt.Printf("FINAL CONFIG: %v\n", cfg) From 7fc099ce56371dc0d8a6cd393a2e4becca22581b Mon Sep 17 00:00:00 2001 From: Lubien Date: Mon, 30 Sep 2024 09:00:43 -0300 Subject: [PATCH 054/104] Deployer: open pr on launch (#3953) * Create and push branch when deployer has DEPLOY_CREATE_AND_PUSH_BRANCH * CREATE_AND_PUSH_BRANCH goes after deploy * Do not create branch unless GIT_REPO * go mod tidy * Push force * This should come last * Failsafe when there's no diff to commit --- deploy.rb | 19 +++++++++++++++++++ deploy/common.rb | 1 + go.mod | 5 ----- go.sum | 12 ------------ scanner/scanner.go | 4 +++- 5 files changed, 23 insertions(+), 18 deletions(-) diff --git a/deploy.rb b/deploy.rb index 7bfe7761fa..affcc8313a 100755 --- a/deploy.rb +++ b/deploy.rb @@ -10,6 +10,8 @@ DEPLOY_NOW = !get_env("DEPLOY_NOW").nil? DEPLOY_CUSTOMIZE = !get_env("NO_DEPLOY_CUSTOMIZE") DEPLOY_ONLY = !get_env("DEPLOY_ONLY").nil? +CREATE_AND_PUSH_BRANCH = !get_env("DEPLOY_CREATE_AND_PUSH_BRANCH").nil? +FLYIO_BRANCH_NAME = "flyio-new-files" DEPLOY_APP_NAME = get_env("DEPLOY_APP_NAME") if !DEPLOY_CUSTOMIZE && !DEPLOY_APP_NAME @@ -29,6 +31,8 @@ GIT_REPO = get_env("GIT_REPO") +CAN_CREATE_AND_PUSH_BRANCH = CREATE_AND_PUSH_BRANCH && GIT_REPO + GIT_REPO_URL = if GIT_REPO repo_url = begin URI(GIT_REPO) @@ -242,6 +246,10 @@ steps.push({id: Step::DEPLOY, description: "Deploy application"}) if DEPLOY_NOW + if CAN_CREATE_AND_PUSH_BRANCH + steps.push({id: Step::CREATE_AND_PUSH_BRANCH, description: "Create Fly.io git branch with new files"}) + end + artifact Artifact::META, { steps: steps } # Join the parallel task thread @@ -378,4 +386,15 @@ end end +if CAN_CREATE_AND_PUSH_BRANCH + in_step Step::CREATE_AND_PUSH_BRANCH do + exec_capture("git checkout -b #{FLYIO_BRANCH_NAME}") + exec_capture("git config user.name \"Fly.io\"") + exec_capture("git config user.email \"noreply@fly.io\"") + exec_capture("git add .") + exec_capture("git commit -m \"New files from Fly.io Launch\" || echo \"No changes to commit\"") + exec_capture("git push -f origin #{FLYIO_BRANCH_NAME}") + end +end + event :end, { ts: ts() } \ No newline at end of file diff --git a/deploy/common.rb b/deploy/common.rb index eea2436024..5ed9a03f41 100644 --- a/deploy/common.rb +++ b/deploy/common.rb @@ -20,6 +20,7 @@ module Step UPSTASH_REDIS = :upstash_redis TIGRIS_OBJECT_STORAGE = :tigris_object_storage SENTRY = :sentry + CREATE_AND_PUSH_BRANCH = :create_and_push_branch DEPLOY = :deploy def self.current diff --git a/go.mod b/go.mod index 8feba93975..023fb58a17 100644 --- a/go.mod +++ b/go.mod @@ -155,7 +155,6 @@ require ( github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect github.com/containerd/ttrpc v1.2.3 // indirect github.com/containerd/typeurl/v2 v2.1.1 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dimchansky/utfbom v1.1.1 // indirect @@ -229,7 +228,6 @@ require ( github.com/prometheus/procfs v0.15.1 // indirect github.com/rivo/tview v0.0.0-20220307222120-9994674d60a8 // indirect github.com/rivo/uniseg v0.4.3 // indirect - github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect @@ -238,7 +236,6 @@ require ( github.com/shibumi/go-pathspec v1.3.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/skeema/knownhosts v1.2.2 // indirect - github.com/sosodev/duration v1.3.1 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect @@ -246,7 +243,6 @@ require ( github.com/tonistiigi/fsutil v0.0.0-20240424095704-91a3fc46842c // indirect github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect github.com/tonistiigi/vt100 v0.0.0-20230623042737-f9a4f7ef6531 // indirect - github.com/urfave/cli/v2 v2.27.4 // indirect github.com/vbatts/tar-split v0.11.5 // indirect github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect @@ -254,7 +250,6 @@ require ( github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect - github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 // indirect go.opentelemetry.io/otel/metric v1.30.0 // indirect diff --git a/go.sum b/go.sum index 3948e156f1..550e371b72 100644 --- a/go.sum +++ b/go.sum @@ -3,8 +3,6 @@ connectrpc.com/connect v1.16.1 h1:rOdrK/RTI/7TVnn3JsVxt3n028MlTRwmK5Q4heSpjis= connectrpc.com/connect v1.16.1/go.mod h1:XpZAduBQUySsb4/KO5JffORVkDI4B6/EYPi7N8xpNZw= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= -github.com/99designs/gqlgen v0.17.53 h1:FJOJaF96d7Y5EBpoaLG96fz1NR6B8bFdCZI1yZwYArM= -github.com/99designs/gqlgen v0.17.53/go.mod h1:77/+pVe6zlTsz++oUg2m8VLgzdUPHxjoAG3BxI5y8Rc= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 h1:59MxjQVfjXsBpLy+dbd2/ELV5ofnUkUZBvWSC85sheA= @@ -208,7 +206,6 @@ github.com/containerd/ttrpc v1.2.3 h1:4jlhbXIGvijRtNC8F/5CpuJZ7yKOBFGFOOXg1bkISz github.com/containerd/ttrpc v1.2.3/go.mod h1:ieWsXucbb8Mj9PH0rXCw1i8IunRbbAiDkpXkbfflWBM= github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4= github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0= -github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= @@ -571,8 +568,6 @@ github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUc github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= -github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDjyw0ULyrTYWeN0UNCCkmCWfjPnIA2W6oviI= github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs= @@ -603,8 +598,6 @@ github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:s github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= -github.com/sosodev/duration v1.3.1 h1:qtHBDMQ6lvMQsL15g4aopM4HEfOaYuhWBw3NPTtlqq4= -github.com/sosodev/duration v1.3.1/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spdx/tools-golang v0.5.3 h1:ialnHeEYUC4+hkm5vJm4qz2x+oEJbS0mAMFrNXdQraY= @@ -659,9 +652,6 @@ github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/ github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk= github.com/tonistiigi/vt100 v0.0.0-20230623042737-f9a4f7ef6531 h1:Y/M5lygoNPKwVNLMPXgVfsRT40CSFKXCxuU8LoHySjs= github.com/tonistiigi/vt100 v0.0.0-20230623042737-f9a4f7ef6531/go.mod h1:ulncasL3N9uLrVann0m+CDlJKWsIAP34MPcOJF6VRvc= -github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk= -github.com/urfave/cli/v2 v2.27.4 h1:o1owoI+02Eb+K107p27wEX9Bb8eqIoZCfLXloLUSWJ8= -github.com/urfave/cli/v2 v2.27.4/go.mod h1:m4QzxcD2qpra4z7WhzEGn74WZLViBnMpb1ToCAKdGRQ= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= github.com/vektah/gqlparser/v2 v2.5.16 h1:1gcmLTvs3JLKXckwCwlUagVn/IlV2bwqle0vJ0vy5p8= @@ -679,8 +669,6 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= -github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= diff --git a/scanner/scanner.go b/scanner/scanner.go index 23a686c786..d2ef81ed38 100644 --- a/scanner/scanner.go +++ b/scanner/scanner.go @@ -3,6 +3,7 @@ package scanner import ( "embed" "io/fs" + "os" "path/filepath" "strings" "text/template" @@ -141,7 +142,8 @@ func Scan(sourceDir string, config *ScannerConfig) (*SourceInfo, error) { if err != nil { return nil, err } - if si != nil { + optOutGithubActions := os.Getenv("OPT_OUT_GITHUB_ACTIONS") + if si != nil && optOutGithubActions == "" { github_actions(sourceDir, &si.GitHubActions) return si, nil } From 30c2e949cd7c4b311dab2fac20f5807898d2c64f Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 30 Sep 2024 12:23:45 -0400 Subject: [PATCH 055/104] bomb if plan propose doesn't detect anything --- internal/command/launch/plan_commands.go | 5 +++++ internal/command/launch/sourceinfo.go | 13 ++++++++++++ test/deployer/deployer_test.go | 25 ++++++++++++++++++++++++ test/testlib/deployer.go | 10 ++++++++++ 4 files changed, 53 insertions(+) diff --git a/internal/command/launch/plan_commands.go b/internal/command/launch/plan_commands.go index 6c6a33a4b0..da97e5116e 100644 --- a/internal/command/launch/plan_commands.go +++ b/internal/command/launch/plan_commands.go @@ -70,6 +70,11 @@ func newPropose() *cobra.Command { Default: "", Hidden: true, }, + flag.Bool{ + Name: "no-blank", + Description: "Don't allow a \"blank\" app (nothing could be detected)", + Default: true, + }, ) return cmd diff --git a/internal/command/launch/sourceinfo.go b/internal/command/launch/sourceinfo.go index 798aff776f..227f5061ce 100644 --- a/internal/command/launch/sourceinfo.go +++ b/internal/command/launch/sourceinfo.go @@ -2,7 +2,9 @@ package launch import ( "context" + "errors" "fmt" + "os" "regexp" "strings" @@ -76,6 +78,17 @@ func determineSourceInfo(ctx context.Context, appConfig *appconfig.Config, copyC if srcInfo == nil { fmt.Fprintln(io.Out, aurora.Green("Could not find a Dockerfile, nor detect a runtime or framework from source code. Continuing with a blank app.")) + if flag.GetBool(ctx, "no-blank") { + entries, err := os.ReadDir("./") + if err == nil { + // TODO: probably remove this... + fmt.Fprintln(io.Out, "are you in the right directory? current directory listing:") + for _, e := range entries { + fmt.Fprintln(io.Out, e.Name()) + } + } + return nil, nil, errors.New("could not detect runtime or Dockerfile") + } return srcInfo, nil, err } diff --git a/test/deployer/deployer_test.go b/test/deployer/deployer_test.go index f2bf591b51..e596f8c409 100644 --- a/test/deployer/deployer_test.go +++ b/test/deployer/deployer_test.go @@ -88,6 +88,31 @@ func TestLaunchBasicNode(t *testing.T) { require.Equal(t, string(body), "Hello, World!") } +func TestLaunchNodeAppDifferentStructure(t *testing.T) { + deploy := testDeployer(t, + createRandomApp, + testlib.WithoutCustomize, + testlib.WithouExtensions, + testlib.WithCopyConfig, + testlib.DeployNow, + testlib.WithGitRepo("https://github.com/akarin-sensei/hello-fly-private-empty.git"), + testlib.WithGitRef("f6e808fbf9c6fea408b18b2e53b43c37e4c7f57f"), + ) + + manifest, err := deploy.Output().ArtifactManifest() + require.NoError(t, err) + require.NotNil(t, manifest) + + require.Equal(t, manifest.Plan.Runtime.Language, "node") + + appName := deploy.Extra["appName"].(string) + + body, err := testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev", appName)) + require.NoError(t, err) + + require.Equal(t, string(body), "Hello, World!") +} + func TestLaunchGoFromRepo(t *testing.T) { deploy := testDeployer(t, createRandomApp, diff --git a/test/testlib/deployer.go b/test/testlib/deployer.go index b69e961804..4fe05c1b35 100644 --- a/test/testlib/deployer.go +++ b/test/testlib/deployer.go @@ -97,6 +97,8 @@ type DeployTestRun struct { deployOnly bool deployNow bool + createAndPushBranch bool + containerBinds []string containerID string @@ -157,6 +159,10 @@ func DeployNow(d *DeployTestRun) { d.deployNow = true } +func CreateAndPushBranch(d *DeployTestRun) { + d.createAndPushBranch = true +} + func WithAppSource(src string) func(*DeployTestRun) { return func(d *DeployTestRun) { d.containerBinds = append(d.containerBinds, fmt.Sprintf("%s:/usr/src/app", src)) @@ -200,6 +206,10 @@ func (d *DeployTestRun) Start(ctx context.Context) error { env = append(env, "DEPLOY_NOW=1") } + if d.createAndPushBranch { + env = append(env, "DEPLOY_CREATE_AND_PUSH_BRANCH=1") + } + fmt.Printf("creating container... image=%s\n", d.deployerImage) cont, err := d.dockerClient.ContainerCreate(ctx, &container.Config{ Image: d.deployerImage, From 717395f035ee73618694316c43e37e4554d8f271 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 30 Sep 2024 12:40:03 -0400 Subject: [PATCH 056/104] fix color output for error message when no framework is detected --- internal/command/launch/sourceinfo.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/internal/command/launch/sourceinfo.go b/internal/command/launch/sourceinfo.go index 227f5061ce..c3acfbb802 100644 --- a/internal/command/launch/sourceinfo.go +++ b/internal/command/launch/sourceinfo.go @@ -77,8 +77,15 @@ func determineSourceInfo(ctx context.Context, appConfig *appconfig.Config, copyC } if srcInfo == nil { - fmt.Fprintln(io.Out, aurora.Green("Could not find a Dockerfile, nor detect a runtime or framework from source code. Continuing with a blank app.")) - if flag.GetBool(ctx, "no-blank") { + var colorFn func(arg interface{}) aurora.Value + noBlank := flag.GetBool(ctx, "no-blank") + if noBlank { + colorFn = aurora.Red + } else { + colorFn = aurora.Green + } + fmt.Fprintln(io.Out, colorFn("Could not find a Dockerfile, nor detect a runtime or framework from source code. Continuing with a blank app.")) + if noBlank { entries, err := os.ReadDir("./") if err == nil { // TODO: probably remove this... From 3b939c0c2d9b974a96be6a61478351c4cf9abe32 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 30 Sep 2024 12:44:47 -0400 Subject: [PATCH 057/104] even if opting out of gha, return the source info --- scanner/scanner.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/scanner/scanner.go b/scanner/scanner.go index d2ef81ed38..68147d75b7 100644 --- a/scanner/scanner.go +++ b/scanner/scanner.go @@ -143,8 +143,10 @@ func Scan(sourceDir string, config *ScannerConfig) (*SourceInfo, error) { return nil, err } optOutGithubActions := os.Getenv("OPT_OUT_GITHUB_ACTIONS") - if si != nil && optOutGithubActions == "" { - github_actions(sourceDir, &si.GitHubActions) + if si != nil { + if optOutGithubActions == "" { + github_actions(sourceDir, &si.GitHubActions) + } return si, nil } } From 811cface4c3f89840a0904d2be32c07027055db3 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 30 Sep 2024 13:08:02 -0400 Subject: [PATCH 058/104] test for opt out gha --- test/deployer/deployer_test.go | 26 +------------------------- test/testlib/deployer.go | 8 ++++++++ 2 files changed, 9 insertions(+), 25 deletions(-) diff --git a/test/deployer/deployer_test.go b/test/deployer/deployer_test.go index e596f8c409..b7111aaf6b 100644 --- a/test/deployer/deployer_test.go +++ b/test/deployer/deployer_test.go @@ -70,6 +70,7 @@ func TestLaunchBasicNode(t *testing.T) { createRandomApp, testlib.WithoutCustomize, testlib.WithouExtensions, + testlib.OptOutGithubActions, testlib.DeployNow, withWorkDirAppSource, ) @@ -88,31 +89,6 @@ func TestLaunchBasicNode(t *testing.T) { require.Equal(t, string(body), "Hello, World!") } -func TestLaunchNodeAppDifferentStructure(t *testing.T) { - deploy := testDeployer(t, - createRandomApp, - testlib.WithoutCustomize, - testlib.WithouExtensions, - testlib.WithCopyConfig, - testlib.DeployNow, - testlib.WithGitRepo("https://github.com/akarin-sensei/hello-fly-private-empty.git"), - testlib.WithGitRef("f6e808fbf9c6fea408b18b2e53b43c37e4c7f57f"), - ) - - manifest, err := deploy.Output().ArtifactManifest() - require.NoError(t, err) - require.NotNil(t, manifest) - - require.Equal(t, manifest.Plan.Runtime.Language, "node") - - appName := deploy.Extra["appName"].(string) - - body, err := testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev", appName)) - require.NoError(t, err) - - require.Equal(t, string(body), "Hello, World!") -} - func TestLaunchGoFromRepo(t *testing.T) { deploy := testDeployer(t, createRandomApp, diff --git a/test/testlib/deployer.go b/test/testlib/deployer.go index 4fe05c1b35..8f85c564fb 100644 --- a/test/testlib/deployer.go +++ b/test/testlib/deployer.go @@ -93,6 +93,7 @@ type DeployTestRun struct { noCustomize bool skipExtensions bool copyConfig bool + optOutGha bool deployOnly bool deployNow bool @@ -151,6 +152,10 @@ func WithCopyConfig(d *DeployTestRun) { d.copyConfig = true } +func OptOutGithubActions(d *DeployTestRun) { + d.optOutGha = true +} + func DeployOnly(d *DeployTestRun) { d.deployOnly = true } @@ -198,6 +203,9 @@ func (d *DeployTestRun) Start(ctx context.Context) error { if d.copyConfig { env = append(env, "DEPLOY_COPY_CONFIG=1") } + if d.optOutGha { + env = append(env, "OPT_OUT_GITHUB_ACTIONS=1") + } if d.deployOnly { env = append(env, "DEPLOY_ONLY=1") From ff17b7d2b86346453883ac444248b997008e08d4 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 30 Sep 2024 13:18:02 -0400 Subject: [PATCH 059/104] download correct default node.js to save some time --- deployer.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deployer.Dockerfile b/deployer.Dockerfile index 9aa442051c..4b3e35eff2 100644 --- a/deployer.Dockerfile +++ b/deployer.Dockerfile @@ -20,7 +20,7 @@ RUN git config --global advice.detachedHead false && \ git config --global init.defaultBranch main ENV DEFAULT_RUBY_VERSION=3.1.6 \ - DEFAULT_NODE_VERSION=20.16.0 \ + DEFAULT_NODE_VERSION=18.16.0 \ DEFAULT_ERLANG_VERSION=26.2.5.2 \ DEFAULT_ELIXIR_VERSION=1.16 \ DEFAULT_BUN_VERSION=1.1.24 \ From 42e59fffcee7ce6b269fe2676c208039dd28b8d4 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 30 Sep 2024 13:18:10 -0400 Subject: [PATCH 060/104] try to cache deployer build --- .github/workflows/build.yml | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b3b0952303..c3bea27fb8 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -40,13 +40,19 @@ jobs: registry: https://index.docker.io/v1/ username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_PASSWORD }} - - name: Deployer docker build & push + - name: Prepare flyctl for deployer Docker build run: | mkdir -p bin cp dist/default_linux_amd64_v1/flyctl bin/flyctl chmod +x bin/flyctl - docker build -t flyio/deployer:${{ github.sha }} -f deployer.Dockerfile . - docker push flyio/deployer:${{ github.sha }} + - name: Build and push + uses: docker/build-push-action@v6 + with: + push: true + file: deployer.Dockerfile + tags: flyio/deployer:${{ github.sha }} + cache-from: type=gha + cache-to: type=gha,mode=max preflight: needs: test_build From fb0063d59e87904471d1867740443a458766a88e Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 30 Sep 2024 13:27:01 -0400 Subject: [PATCH 061/104] setup buildx --- .github/workflows/build.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c3bea27fb8..ec5b74612a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -34,6 +34,8 @@ jobs: name: flyctl path: dist/default_linux_amd64_v1/flyctl overwrite: true + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 - name: Login to GitHub Container Registry uses: docker/login-action@v3 with: From f2379acec9f206b87b5f528ffc9b2e222912352a Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 30 Sep 2024 13:45:53 -0400 Subject: [PATCH 062/104] use local context for build --- .github/workflows/build.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ec5b74612a..110ce9c1e2 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -51,6 +51,7 @@ jobs: uses: docker/build-push-action@v6 with: push: true + context: . file: deployer.Dockerfile tags: flyio/deployer:${{ github.sha }} cache-from: type=gha From a5e56bf113ac4f62e4632d120bb7a379488a1979 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Tue, 1 Oct 2024 09:37:31 -0400 Subject: [PATCH 063/104] Run deployer tests in separate workflow (#3977) * attempt at parallelizing deployer and preflight tests * deployer build on workflow_call, not push * job misnomer on deployer-tests * add matrix for deployer-tests * don't cancel-in-progress on deployer-tests * don't run preflight for now * build deployer image once * debug artifact download not downloading? * create bin directory to move file there... painful * bin/flyctl required inside the tests * not using depot might be worse after all * don't wait for go-releaser, just make build * rename workflow for deployer tests [ci skip] --- .github/workflows/build.yml | 28 ++---- .github/workflows/deployer-tests.yml | 124 +++++++++++++++++++++++++++ .github/workflows/preflight.yml | 3 +- deploy.rb | 2 +- scripts/deployer-tests.sh | 60 +++++++++++++ scripts/preflight.sh | 2 +- 6 files changed, 194 insertions(+), 25 deletions(-) create mode 100644 .github/workflows/deployer-tests.yml create mode 100755 scripts/deployer-tests.sh diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 110ce9c1e2..b55ef9e2b8 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -34,30 +34,14 @@ jobs: name: flyctl path: dist/default_linux_amd64_v1/flyctl overwrite: true - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - name: Login to GitHub Container Registry - uses: docker/login-action@v3 - with: - registry: https://index.docker.io/v1/ - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_PASSWORD }} - - name: Prepare flyctl for deployer Docker build - run: | - mkdir -p bin - cp dist/default_linux_amd64_v1/flyctl bin/flyctl - chmod +x bin/flyctl - - name: Build and push - uses: docker/build-push-action@v6 - with: - push: true - context: . - file: deployer.Dockerfile - tags: flyio/deployer:${{ github.sha }} - cache-from: type=gha - cache-to: type=gha,mode=max preflight: needs: test_build uses: ./.github/workflows/preflight.yml secrets: inherit + + # deployer-tests: + # needs: test_build + # uses: ./.github/workflows/deployer-tests.yml + # secrets: inherit + diff --git a/.github/workflows/deployer-tests.yml b/.github/workflows/deployer-tests.yml new file mode 100644 index 0000000000..9d5b034017 --- /dev/null +++ b/.github/workflows/deployer-tests.yml @@ -0,0 +1,124 @@ +name: Deployer tests +on: + push: + +jobs: + build-deployer: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - uses: actions/setup-go@v5 + with: + go-version-file: "go.mod" + check-latest: true + - name: "Build flyctl" + run: make build + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: https://index.docker.io/v1/ + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + + - name: Upload flyctl for the deployer tests + uses: actions/upload-artifact@v4 + with: + name: deployer-flyctl + path: bin/flyctl + overwrite: true + + - name: Build and push + uses: docker/build-push-action@v6 + with: + push: true + context: . + file: deployer.Dockerfile + tags: flyio/deployer:${{ github.sha }} + cache-from: type=gha + cache-to: type=gha,mode=max + + deployer-tests: + needs: build-deployer + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + vm_size: [""] + parallelism: [10] + index: [0,1,2,3,4,5,6,7,8,9] + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - uses: actions/setup-go@v5 + with: + go-version-file: "go.mod" + check-latest: true + - name: Get go version + id: go-version + run: echo "name=version::$(go env GOVERSION)" >> $GITHUB_OUTPUT + - name: Install gotesplit, set FLY_PREFLIGHT_TEST_APP_PREFIX + run: | + curl -sfL https://raw.githubusercontent.com/Songmu/gotesplit/v0.2.1/install.sh | sh -s + echo "FLY_PREFLIGHT_TEST_APP_PREFIX=pf-gha-$(openssl rand -hex 4)" >> "$GITHUB_ENV" + - uses: actions/download-artifact@v4 + with: + name: deployer-flyctl + path: deployer-bin + - name: Move flyctl binary to correct directory + run: | + mkdir -p bin + mv deployer-bin/flyctl bin/flyctl + chmod +x bin/flyctl + - name: Run deployer tests + id: deployer-tests + env: + FLY_PREFLIGHT_TEST_ACCESS_TOKEN: ${{ secrets.FLYCTL_PREFLIGHT_CI_FLY_API_TOKEN }} + FLY_PREFLIGHT_TEST_FLY_ORG: flyctl-ci-preflight + # This VM size is only available in ORD. + FLY_PREFLIGHT_TEST_FLY_REGIONS: ord + FLY_PREFLIGHT_TEST_NO_PRINT_HISTORY_ON_FAIL: "true" + FLY_FORCE_TRACE: "true" + FLY_PREFLIGHT_TEST_VM_SIZE: ${{ matrix.vm_size }} + FLY_DEPLOYER_IMAGE: "flyio/deployer:${{ github.sha }}" + FLY_PREFLIGHT_TEST_APP_PREFIX: "deployertest" + run: | + export PATH=$PWD/bin:$PATH + echo -n failed= >> $GITHUB_OUTPUT + ./scripts/deployer-tests.sh -r "${{ github.ref }}" -t "${{ matrix.parallelism }}" -i "${{ matrix.index }}" -o $GITHUB_OUTPUT + - name: Post failure to slack + if: ${{ github.ref == 'refs/heads/master' && failure() }} + uses: slackapi/slack-github-action@37ebaef184d7626c5f204ab8d3baff4262dd30f0 + env: + SLACK_WEBHOOK_URL: ${{ secrets.PREFLIGHT_SLACK_WEBHOOK_URL }} + SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK + with: + channel-id: 'C0790M2E0G2' + payload: | + { + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ":sob: deployer tests failed: ${{ steps.deployer-tests.outputs.failed }} ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + } + } + ] + } + - uses: superfly/flyctl-actions/setup-flyctl@master + if: always() + - name: Clean up any un-deleted deployer-tests apps + if: always() + env: + FLY_API_TOKEN: ${{ secrets.FLYCTL_PREFLIGHT_CI_FLY_API_TOKEN }} + FLY_PREFLIGHT_TEST_FLY_ORG: flyctl-ci-preflight + FLY_PREFLIGHT_TEST_APP_PREFIX: "deployertest" + run: | + ./scripts/delete_preflight_apps.sh "$FLY_PREFLIGHT_TEST_APP_PREFIX" \ No newline at end of file diff --git a/.github/workflows/preflight.yml b/.github/workflows/preflight.yml index 40d7a8e558..f8db5fe4aa 100644 --- a/.github/workflows/preflight.yml +++ b/.github/workflows/preflight.yml @@ -51,7 +51,7 @@ jobs: FLY_PREFLIGHT_TEST_NO_PRINT_HISTORY_ON_FAIL: "true" FLY_FORCE_TRACE: "true" FLY_PREFLIGHT_TEST_VM_SIZE: ${{ matrix.vm_size }} - FLY_DEPLOYER_IMAGE: "flyio/deployer:${{ github.sha }}" + FLY_PREFLIGHT_TEST_APP_PREFIX: "preflight" run: | export PATH=$PWD/bin:$PATH echo -n failed= >> $GITHUB_OUTPUT @@ -82,5 +82,6 @@ jobs: env: FLY_API_TOKEN: ${{ secrets.FLYCTL_PREFLIGHT_CI_FLY_API_TOKEN }} FLY_PREFLIGHT_TEST_FLY_ORG: flyctl-ci-preflight + FLY_PREFLIGHT_TEST_APP_PREFIX: "preflight" run: | ./scripts/delete_preflight_apps.sh "$FLY_PREFLIGHT_TEST_APP_PREFIX" diff --git a/deploy.rb b/deploy.rb index affcc8313a..3d52b6f658 100755 --- a/deploy.rb +++ b/deploy.rb @@ -279,7 +279,7 @@ else image_ref = "registry.fly.io/#{APP_NAME}:#{image_tag}" - exec_capture("flyctl deploy --build-only --depot=false --push -a #{APP_NAME} --image-label #{image_tag}") + exec_capture("flyctl deploy --build-only --push -a #{APP_NAME} --image-label #{image_tag}") artifact Artifact::DOCKER_IMAGE, { ref: image_ref } image_ref end diff --git a/scripts/deployer-tests.sh b/scripts/deployer-tests.sh new file mode 100755 index 0000000000..6d7969aeb3 --- /dev/null +++ b/scripts/deployer-tests.sh @@ -0,0 +1,60 @@ +#! /bin/bash +set -euo pipefail + +ref= +total= +index= +out= + +while getopts r:t:i:o: name +do + case "$name" in + r) + ref="$OPTARG" + ;; + t) + total="$OPTARG" + ;; + i) + index="$OPTARG" + ;; + o) + out="$OPTARG" + ;; + ?) + printf "Usage: %s: [-r REF] [-t TOTAL] [-i INDEX] [-o FILE]\n" $0 + exit 2 + ;; + esac +done + +shift $(($OPTIND - 1)) + +test_opts= +if [[ "$ref" != "refs/heads/master" ]]; then + test_opts=-short +fi + +test_log="$(mktemp)" +function finish { + rm "$test_log" +} +trap finish EXIT + +set +e + +gotesplit \ + -total "$total" \ + -index "$index" \ + github.com/superfly/flyctl/test/deployer/... \ + -- --tags=integration -v -timeout=10m $test_opts | tee "$test_log" +test_status=$? + +set -e + +if [[ -n "$out" ]]; then + awk '/^--- FAIL:/{ printf("%s ", $3) }' "$test_log" >> "$out" + echo >> "$out" +fi + +exit $test_status diff --git a/scripts/preflight.sh b/scripts/preflight.sh index 3e94fae4fc..abdbaf69b6 100755 --- a/scripts/preflight.sh +++ b/scripts/preflight.sh @@ -46,7 +46,7 @@ set +e gotesplit \ -total "$total" \ -index "$index" \ - github.com/superfly/flyctl/test/preflight/... github.com/superfly/flyctl/test/deployer/... \ + github.com/superfly/flyctl/test/preflight/... \ -- --tags=integration -v -timeout=10m $test_opts | tee "$test_log" test_status=$? From bc093bc8c45ef8b69c262a946cea18f56c918288 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Tue, 1 Oct 2024 12:10:11 -0400 Subject: [PATCH 064/104] adjust logs when not detecting runtime or Dockerfile --- internal/command/launch/sourceinfo.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/internal/command/launch/sourceinfo.go b/internal/command/launch/sourceinfo.go index c3acfbb802..6a0b66d7c1 100644 --- a/internal/command/launch/sourceinfo.go +++ b/internal/command/launch/sourceinfo.go @@ -84,17 +84,21 @@ func determineSourceInfo(ctx context.Context, appConfig *appconfig.Config, copyC } else { colorFn = aurora.Green } - fmt.Fprintln(io.Out, colorFn("Could not find a Dockerfile, nor detect a runtime or framework from source code. Continuing with a blank app.")) + msg := "Could not find a Dockerfile, nor detect a runtime or framework from source code." + if !noBlank { + msg += " Continuing with a blank app." + } + fmt.Fprintln(io.Out, colorFn(msg)) if noBlank { entries, err := os.ReadDir("./") if err == nil { // TODO: probably remove this... - fmt.Fprintln(io.Out, "are you in the right directory? current directory listing:") + fmt.Fprintln(io.Out, "Are you in the right directory? Current directory listing:") for _, e := range entries { fmt.Fprintln(io.Out, e.Name()) } } - return nil, nil, errors.New("could not detect runtime or Dockerfile") + return nil, nil, errors.New("Could not detect runtime or Dockerfile") } return srcInfo, nil, err } From 3057043bac9c5216961ea279d5cd75e15babcbb1 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Wed, 2 Oct 2024 09:49:01 -0400 Subject: [PATCH 065/104] synchronously write to stdout/stderr --- deploy.rb | 3 +++ 1 file changed, 3 insertions(+) diff --git a/deploy.rb b/deploy.rb index 3d52b6f658..9b8ba75599 100755 --- a/deploy.rb +++ b/deploy.rb @@ -1,5 +1,8 @@ #!/usr/bin/env ruby +$stdout.sync = true +$stderr.sync = true + require './deploy/common' event :start, { ts: ts() } From 9ec4879794bc97b55afa882dbf69f9dfdb5de4f8 Mon Sep 17 00:00:00 2001 From: Lubien Date: Mon, 7 Oct 2024 10:56:54 -0300 Subject: [PATCH 066/104] Custom config for deployer monorepos (#3976) * Allow custom fly config path * Add DEPLOYER_SOURCE_CWD * Do not use --config unless available * Move config file check to git pull step * Fix fly.toml path * Test custom CWD * Fix envs * Always check DEPLOYER_SOURCE_CWD regardless of git pull * Do not dupe steps --- deploy.rb | 30 ++++++-- test/deployer/deployer_test.go | 68 ++++++++++++++++++- .../.dockerignore | 6 ++ .../deploy-node-custom-config-path/Dockerfile | 39 +++++++++++ .../custom-fly-config.toml | 31 +++++++++ .../deploy-node-custom-config-path/index.js | 14 ++++ .../package-lock.json | 12 ++++ .../package.json | 8 +++ .../deploy-node-custom-config-path/somefile | 0 .../inner-repo/.dockerignore | 6 ++ .../inner-repo/Dockerfile | 39 +++++++++++ .../deploy-node-monorepo/inner-repo/fly.toml | 31 +++++++++ .../deploy-node-monorepo/inner-repo/index.js | 14 ++++ .../inner-repo/package-lock.json | 12 ++++ .../inner-repo/package.json | 8 +++ .../deploy-node-monorepo/inner-repo/somefile | 0 test/testlib/deployer.go | 11 ++- 17 files changed, 323 insertions(+), 6 deletions(-) create mode 100644 test/fixtures/deploy-node-custom-config-path/.dockerignore create mode 100644 test/fixtures/deploy-node-custom-config-path/Dockerfile create mode 100644 test/fixtures/deploy-node-custom-config-path/custom-fly-config.toml create mode 100644 test/fixtures/deploy-node-custom-config-path/index.js create mode 100644 test/fixtures/deploy-node-custom-config-path/package-lock.json create mode 100644 test/fixtures/deploy-node-custom-config-path/package.json create mode 100644 test/fixtures/deploy-node-custom-config-path/somefile create mode 100644 test/fixtures/deploy-node-monorepo/inner-repo/.dockerignore create mode 100644 test/fixtures/deploy-node-monorepo/inner-repo/Dockerfile create mode 100644 test/fixtures/deploy-node-monorepo/inner-repo/fly.toml create mode 100644 test/fixtures/deploy-node-monorepo/inner-repo/index.js create mode 100644 test/fixtures/deploy-node-monorepo/inner-repo/package-lock.json create mode 100644 test/fixtures/deploy-node-monorepo/inner-repo/package.json create mode 100644 test/fixtures/deploy-node-monorepo/inner-repo/somefile diff --git a/deploy.rb b/deploy.rb index 9b8ba75599..0f7ea8962c 100755 --- a/deploy.rb +++ b/deploy.rb @@ -16,6 +16,8 @@ CREATE_AND_PUSH_BRANCH = !get_env("DEPLOY_CREATE_AND_PUSH_BRANCH").nil? FLYIO_BRANCH_NAME = "flyio-new-files" +DEPLOYER_FLY_CONFIG_PATH = get_env("DEPLOYER_FLY_CONFIG_PATH") +DEPLOYER_SOURCE_CWD = get_env("DEPLOYER_SOURCE_CWD") DEPLOY_APP_NAME = get_env("DEPLOY_APP_NAME") if !DEPLOY_CUSTOMIZE && !DEPLOY_APP_NAME event :error, { type: :validation, message: "missing app name" } @@ -92,7 +94,27 @@ end end -HAS_FLY_CONFIG = Dir.entries(".").any? { |f| File.fnmatch('fly.{toml,json,yaml,yml}', f, File::FNM_EXTGLOB)} +if !DEPLOYER_SOURCE_CWD.nil? + Dir.chdir(DEPLOYER_SOURCE_CWD) +end + +if !DEPLOYER_FLY_CONFIG_PATH.nil? && !File.exists?(DEPLOYER_FLY_CONFIG_PATH) + event :error, { type: :validation, message: "Config file #{DEPLOYER_FLY_CONFIG_PATH} does not exist" } + exit 1 +end + +FLY_CONFIG_PATH = if !DEPLOYER_FLY_CONFIG_PATH.nil? + DEPLOYER_FLY_CONFIG_PATH +else + Dir.entries(".").find { |f| File.fnmatch('fly.{toml,json,yaml,yml}', f, File::FNM_EXTGLOB)} +end +HAS_FLY_CONFIG = !FLY_CONFIG_PATH.nil? + +CONFIG_COMMAND_STRING = if HAS_FLY_CONFIG + "--config #{FLY_CONFIG_PATH}" +else + "" +end if !DEPLOY_ONLY MANIFEST_PATH = "/tmp/manifest.json" @@ -271,7 +293,7 @@ end # TODO: better error if missing config -fly_config = manifest && manifest.dig("config") || JSON.parse(exec_capture("flyctl config show --local", log: false)) +fly_config = manifest && manifest.dig("config") || JSON.parse(exec_capture("flyctl config show --local #{CONFIG_COMMAND_STRING}", log: false)) APP_NAME = DEPLOY_APP_NAME || fly_config["app"] image_ref = in_step Step::BUILD do @@ -282,7 +304,7 @@ else image_ref = "registry.fly.io/#{APP_NAME}:#{image_tag}" - exec_capture("flyctl deploy --build-only --push -a #{APP_NAME} --image-label #{image_tag}") + exec_capture("flyctl deploy --build-only --push -a #{APP_NAME} --image-label #{image_tag} #{CONFIG_COMMAND_STRING}") artifact Artifact::DOCKER_IMAGE, { ref: image_ref } image_ref end @@ -385,7 +407,7 @@ if DEPLOY_NOW in_step Step::DEPLOY do - exec_capture("flyctl deploy -a #{APP_NAME} --image #{image_ref}") + exec_capture("flyctl deploy -a #{APP_NAME} --image #{image_ref} #{CONFIG_COMMAND_STRING}") end end diff --git a/test/deployer/deployer_test.go b/test/deployer/deployer_test.go index b7111aaf6b..48f1a98233 100644 --- a/test/deployer/deployer_test.go +++ b/test/deployer/deployer_test.go @@ -36,6 +36,56 @@ func TestDeployBasicNode(t *testing.T) { require.Contains(t, string(body), fmt.Sprintf("Hello, World! %s", deploy.Extra["TEST_ID"].(string))) } +func TestDeployBasicNodeWithCustomConfigPath(t *testing.T) { + deploy := testDeployer(t, + withCustomFlyTomlPath("custom-fly-config.toml"), + withFixtureApp("deploy-node-custom-config-path"), + createRandomApp, + withOverwrittenConfig(func(d *testlib.DeployTestRun) map[string]any { + return map[string]any{ + "app": d.Extra["appName"], + "region": d.PrimaryRegion(), + "env": map[string]string{ + "TEST_ID": d.ID(), + }, + } + }), + testlib.DeployOnly, + testlib.DeployNow, + withWorkDirAppSource, + ) + + body, err := testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev", deploy.Extra["appName"].(string))) + require.NoError(t, err) + + require.Contains(t, string(body), fmt.Sprintf("Hello, World! %s", deploy.Extra["TEST_ID"].(string))) +} + +func TestDeployBasicNodeMonorepo(t *testing.T) { + deploy := testDeployer(t, + withCustomCwd("inner-repo"), + withFixtureApp("deploy-node-monorepo"), + createRandomApp, + withOverwrittenConfig(func(d *testlib.DeployTestRun) map[string]any { + return map[string]any{ + "app": d.Extra["appName"], + "region": d.PrimaryRegion(), + "env": map[string]string{ + "TEST_ID": d.ID(), + }, + } + }), + testlib.DeployOnly, + testlib.DeployNow, + withWorkDirAppSource, + ) + + body, err := testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev", deploy.Extra["appName"].(string))) + require.NoError(t, err) + + require.Contains(t, string(body), fmt.Sprintf("Hello, World! %s", deploy.Extra["TEST_ID"].(string))) +} + func TestLaunchBasicNodeWithDockerfile(t *testing.T) { deploy := testDeployer(t, withFixtureApp("deploy-node"), @@ -124,9 +174,25 @@ func withFixtureApp(name string) func(*testlib.DeployTestRun) { } } +func withCustomFlyTomlPath(name string) func(*testlib.DeployTestRun) { + return func(d *testlib.DeployTestRun) { + d.FlyTomlPath = name + } +} + +func withCustomCwd(name string) func(*testlib.DeployTestRun) { + return func(d *testlib.DeployTestRun) { + d.Cwd = name + } +} + func withOverwrittenConfig(raw any) func(*testlib.DeployTestRun) { return func(d *testlib.DeployTestRun) { - flyTomlPath := fmt.Sprintf("%s/fly.toml", d.WorkDir()) + flyTomlPath := d.WorkDir() + if d.Cwd != "" { + flyTomlPath = fmt.Sprintf("%s/%s", flyTomlPath, d.Cwd) + } + flyTomlPath = fmt.Sprintf("%s/%s", flyTomlPath, d.FlyTomlPath) data := make(map[string]any) switch cast := raw.(type) { case map[string]any: diff --git a/test/fixtures/deploy-node-custom-config-path/.dockerignore b/test/fixtures/deploy-node-custom-config-path/.dockerignore new file mode 100644 index 0000000000..74340d4b1f --- /dev/null +++ b/test/fixtures/deploy-node-custom-config-path/.dockerignore @@ -0,0 +1,6 @@ +/.git +/node_modules +.dockerignore +.env +Dockerfile +fly.toml diff --git a/test/fixtures/deploy-node-custom-config-path/Dockerfile b/test/fixtures/deploy-node-custom-config-path/Dockerfile new file mode 100644 index 0000000000..d86daec5ba --- /dev/null +++ b/test/fixtures/deploy-node-custom-config-path/Dockerfile @@ -0,0 +1,39 @@ +# syntax = docker/dockerfile:1 + +# Adjust NODE_VERSION as desired +ARG NODE_VERSION=21.6.2 +FROM node:${NODE_VERSION}-slim as base + +LABEL fly_launch_runtime="Node.js" + +# Node.js app lives here +WORKDIR /app + +# Set production environment +ENV NODE_ENV="production" + + +# Throw-away build stage to reduce size of final image +FROM base as build + +# Install packages needed to build node modules +RUN apt-get update -qq && \ + apt-get install --no-install-recommends -y build-essential node-gyp pkg-config python-is-python3 + +# Install node modules +COPY --link package-lock.json package.json ./ +RUN npm ci + +# Copy application code +COPY --link . . + + +# Final stage for app image +FROM base + +# Copy built application +COPY --from=build /app /app + +# Start the server by default, this can be overwritten at runtime +EXPOSE 3000 +CMD [ "npm", "run", "start" ] diff --git a/test/fixtures/deploy-node-custom-config-path/custom-fly-config.toml b/test/fixtures/deploy-node-custom-config-path/custom-fly-config.toml new file mode 100644 index 0000000000..77fe4fbb2e --- /dev/null +++ b/test/fixtures/deploy-node-custom-config-path/custom-fly-config.toml @@ -0,0 +1,31 @@ +app = "{{apps.0}}" +primary_region = '{{region}}' + +[build] + dockerfile = 'Dockerfile' + +[deploy] + release_command = "sleep 2" + +[env] + TEST_ID = "{{test.id}}" + +[http_service] + internal_port = 8080 + force_https = true + auto_stop_machines = "stop" + auto_start_machines = true + min_machines_running = 0 + processes = ['app'] + + [[http_service.checks]] + grace_period = "5s" + interval = "20s" + method = "GET" + timeout = "5s" + path = "/" + +[[vm]] + memory = '1gb' + cpu_kind = 'shared' + cpus = 1 diff --git a/test/fixtures/deploy-node-custom-config-path/index.js b/test/fixtures/deploy-node-custom-config-path/index.js new file mode 100644 index 0000000000..998f3f3ad5 --- /dev/null +++ b/test/fixtures/deploy-node-custom-config-path/index.js @@ -0,0 +1,14 @@ +const http = require('http'); + +http.createServer((request, response) => { + response.writeHead(200, + { + 'Content-Type': 'text/plain' + } + ); + + // prints environment variable value + response.write(`Hello, World! ${process.env["TEST_ID"]}\n`); + response.end(); + +}).listen(8080); diff --git a/test/fixtures/deploy-node-custom-config-path/package-lock.json b/test/fixtures/deploy-node-custom-config-path/package-lock.json new file mode 100644 index 0000000000..44a78d8204 --- /dev/null +++ b/test/fixtures/deploy-node-custom-config-path/package-lock.json @@ -0,0 +1,12 @@ +{ + "name": "hello-node", + "version": "1.0.0", + "lockfileVersion": 2, + "requires": true, + "packages": { + "": { + "name": "hello-node", + "version": "1.0.0" + } + } +} diff --git a/test/fixtures/deploy-node-custom-config-path/package.json b/test/fixtures/deploy-node-custom-config-path/package.json new file mode 100644 index 0000000000..9a1d0260d5 --- /dev/null +++ b/test/fixtures/deploy-node-custom-config-path/package.json @@ -0,0 +1,8 @@ +{ + "name": "hello-node", + "version": "1.0.0", + "main": "index.js", + "scripts": { + "start": "node index.js" + } +} diff --git a/test/fixtures/deploy-node-custom-config-path/somefile b/test/fixtures/deploy-node-custom-config-path/somefile new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-node-monorepo/inner-repo/.dockerignore b/test/fixtures/deploy-node-monorepo/inner-repo/.dockerignore new file mode 100644 index 0000000000..74340d4b1f --- /dev/null +++ b/test/fixtures/deploy-node-monorepo/inner-repo/.dockerignore @@ -0,0 +1,6 @@ +/.git +/node_modules +.dockerignore +.env +Dockerfile +fly.toml diff --git a/test/fixtures/deploy-node-monorepo/inner-repo/Dockerfile b/test/fixtures/deploy-node-monorepo/inner-repo/Dockerfile new file mode 100644 index 0000000000..d86daec5ba --- /dev/null +++ b/test/fixtures/deploy-node-monorepo/inner-repo/Dockerfile @@ -0,0 +1,39 @@ +# syntax = docker/dockerfile:1 + +# Adjust NODE_VERSION as desired +ARG NODE_VERSION=21.6.2 +FROM node:${NODE_VERSION}-slim as base + +LABEL fly_launch_runtime="Node.js" + +# Node.js app lives here +WORKDIR /app + +# Set production environment +ENV NODE_ENV="production" + + +# Throw-away build stage to reduce size of final image +FROM base as build + +# Install packages needed to build node modules +RUN apt-get update -qq && \ + apt-get install --no-install-recommends -y build-essential node-gyp pkg-config python-is-python3 + +# Install node modules +COPY --link package-lock.json package.json ./ +RUN npm ci + +# Copy application code +COPY --link . . + + +# Final stage for app image +FROM base + +# Copy built application +COPY --from=build /app /app + +# Start the server by default, this can be overwritten at runtime +EXPOSE 3000 +CMD [ "npm", "run", "start" ] diff --git a/test/fixtures/deploy-node-monorepo/inner-repo/fly.toml b/test/fixtures/deploy-node-monorepo/inner-repo/fly.toml new file mode 100644 index 0000000000..77fe4fbb2e --- /dev/null +++ b/test/fixtures/deploy-node-monorepo/inner-repo/fly.toml @@ -0,0 +1,31 @@ +app = "{{apps.0}}" +primary_region = '{{region}}' + +[build] + dockerfile = 'Dockerfile' + +[deploy] + release_command = "sleep 2" + +[env] + TEST_ID = "{{test.id}}" + +[http_service] + internal_port = 8080 + force_https = true + auto_stop_machines = "stop" + auto_start_machines = true + min_machines_running = 0 + processes = ['app'] + + [[http_service.checks]] + grace_period = "5s" + interval = "20s" + method = "GET" + timeout = "5s" + path = "/" + +[[vm]] + memory = '1gb' + cpu_kind = 'shared' + cpus = 1 diff --git a/test/fixtures/deploy-node-monorepo/inner-repo/index.js b/test/fixtures/deploy-node-monorepo/inner-repo/index.js new file mode 100644 index 0000000000..998f3f3ad5 --- /dev/null +++ b/test/fixtures/deploy-node-monorepo/inner-repo/index.js @@ -0,0 +1,14 @@ +const http = require('http'); + +http.createServer((request, response) => { + response.writeHead(200, + { + 'Content-Type': 'text/plain' + } + ); + + // prints environment variable value + response.write(`Hello, World! ${process.env["TEST_ID"]}\n`); + response.end(); + +}).listen(8080); diff --git a/test/fixtures/deploy-node-monorepo/inner-repo/package-lock.json b/test/fixtures/deploy-node-monorepo/inner-repo/package-lock.json new file mode 100644 index 0000000000..44a78d8204 --- /dev/null +++ b/test/fixtures/deploy-node-monorepo/inner-repo/package-lock.json @@ -0,0 +1,12 @@ +{ + "name": "hello-node", + "version": "1.0.0", + "lockfileVersion": 2, + "requires": true, + "packages": { + "": { + "name": "hello-node", + "version": "1.0.0" + } + } +} diff --git a/test/fixtures/deploy-node-monorepo/inner-repo/package.json b/test/fixtures/deploy-node-monorepo/inner-repo/package.json new file mode 100644 index 0000000000..9a1d0260d5 --- /dev/null +++ b/test/fixtures/deploy-node-monorepo/inner-repo/package.json @@ -0,0 +1,8 @@ +{ + "name": "hello-node", + "version": "1.0.0", + "main": "index.js", + "scripts": { + "start": "node index.js" + } +} diff --git a/test/fixtures/deploy-node-monorepo/inner-repo/somefile b/test/fixtures/deploy-node-monorepo/inner-repo/somefile new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/testlib/deployer.go b/test/testlib/deployer.go index 8f85c564fb..06d86fd9bb 100644 --- a/test/testlib/deployer.go +++ b/test/testlib/deployer.go @@ -68,7 +68,7 @@ func (d *DeployerTestEnv) Close() error { } func (d *DeployerTestEnv) NewRun(options ...func(*DeployTestRun)) *DeployTestRun { - run := &DeployTestRun{FlyctlTestEnv: d.FlyctlTestEnv, dockerClient: d.dockerClient, deployerImage: d.image, apiToken: d.FlyctlTestEnv.AccessToken(), orgSlug: d.FlyctlTestEnv.OrgSlug(), containerBinds: []string{}, Extra: make(map[string]interface{})} + run := &DeployTestRun{FlyctlTestEnv: d.FlyctlTestEnv, dockerClient: d.dockerClient, deployerImage: d.image, apiToken: d.FlyctlTestEnv.AccessToken(), orgSlug: d.FlyctlTestEnv.OrgSlug(), containerBinds: []string{}, Extra: make(map[string]interface{}), Cwd: "", FlyTomlPath: "fly.toml"} for _, o := range options { o(run) } @@ -114,6 +114,9 @@ type DeployTestRun struct { err error Extra map[string]interface{} + + Cwd string + FlyTomlPath string } func WithApp(app string) func(*DeployTestRun) { @@ -213,6 +216,12 @@ func (d *DeployTestRun) Start(ctx context.Context) error { if d.deployNow { env = append(env, "DEPLOY_NOW=1") } + if d.FlyTomlPath != "fly.toml" { + env = append(env, fmt.Sprintf("DEPLOYER_FLY_CONFIG_PATH=%s", d.FlyTomlPath)) + } + if d.Cwd != "" { + env = append(env, fmt.Sprintf("DEPLOYER_SOURCE_CWD=%s", d.Cwd)) + } if d.createAndPushBranch { env = append(env, "DEPLOY_CREATE_AND_PUSH_BRANCH=1") From 79b10031609f18e0b076ade94232731884fe736b Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Fri, 11 Oct 2024 12:27:26 -0400 Subject: [PATCH 067/104] add rails 8 test, fix rvm install and usage, better ruby version detection when using rails --- deploy.rb | 1 + deploy/common.rb | 2 +- deployer.Dockerfile | 22 +- scanner/rails.go | 41 +- scanner/ruby.go | 28 +- scanner/ruby_test.go | 21 ++ test/deployer/deployer_test.go | 22 ++ test/fixtures/deploy-rails-8/.dockerignore | 47 +++ test/fixtures/deploy-rails-8/.gitattributes | 9 + .../deploy-rails-8/.github/dependabot.yml | 12 + .../deploy-rails-8/.github/workflows/ci.yml | 90 +++++ test/fixtures/deploy-rails-8/.gitignore | 34 ++ .../.kamal/hooks/docker-setup.sample | 3 + .../.kamal/hooks/post-deploy.sample | 14 + .../.kamal/hooks/post-proxy-reboot.sample | 3 + .../.kamal/hooks/pre-build.sample | 51 +++ .../.kamal/hooks/pre-connect.sample | 47 +++ .../.kamal/hooks/pre-deploy.sample | 109 ++++++ .../.kamal/hooks/pre-proxy-reboot.sample | 3 + test/fixtures/deploy-rails-8/.kamal/secrets | 17 + test/fixtures/deploy-rails-8/.rubocop.yml | 8 + test/fixtures/deploy-rails-8/.ruby-version | 1 + test/fixtures/deploy-rails-8/Dockerfile | 72 ++++ test/fixtures/deploy-rails-8/Gemfile | 63 ++++ test/fixtures/deploy-rails-8/Gemfile.lock | 349 ++++++++++++++++++ test/fixtures/deploy-rails-8/README.md | 24 ++ test/fixtures/deploy-rails-8/Rakefile | 6 + .../deploy-rails-8/app/assets/images/.keep | 0 .../app/assets/stylesheets/application.css | 10 + .../app/controllers/application_controller.rb | 4 + .../app/controllers/concerns/.keep | 0 .../app/helpers/application_helper.rb | 2 + .../app/jobs/application_job.rb | 7 + .../app/mailers/application_mailer.rb | 4 + .../app/models/application_record.rb | 3 + .../deploy-rails-8/app/models/concerns/.keep | 0 .../app/views/layouts/application.html.erb | 27 ++ .../app/views/layouts/mailer.html.erb | 13 + .../app/views/layouts/mailer.text.erb | 1 + .../app/views/pwa/manifest.json.erb | 22 ++ .../app/views/pwa/service-worker.js | 26 ++ test/fixtures/deploy-rails-8/bin/brakeman | 7 + test/fixtures/deploy-rails-8/bin/bundle | 109 ++++++ test/fixtures/deploy-rails-8/bin/dev | 2 + .../deploy-rails-8/bin/docker-entrypoint | 14 + test/fixtures/deploy-rails-8/bin/kamal | 27 ++ test/fixtures/deploy-rails-8/bin/rails | 4 + test/fixtures/deploy-rails-8/bin/rake | 4 + test/fixtures/deploy-rails-8/bin/rubocop | 8 + test/fixtures/deploy-rails-8/bin/setup | 35 ++ test/fixtures/deploy-rails-8/bin/thrust | 5 + test/fixtures/deploy-rails-8/config.ru | 6 + .../deploy-rails-8/config/application.rb | 27 ++ test/fixtures/deploy-rails-8/config/boot.rb | 4 + test/fixtures/deploy-rails-8/config/cable.yml | 10 + .../deploy-rails-8/config/credentials.yml.enc | 1 + .../deploy-rails-8/config/database.yml | 41 ++ .../fixtures/deploy-rails-8/config/deploy.yml | 112 ++++++ .../deploy-rails-8/config/environment.rb | 5 + .../config/environments/development.rb | 72 ++++ .../config/environments/production.rb | 89 +++++ .../config/environments/test.rb | 53 +++ .../config/initializers/assets.rb | 7 + .../initializers/content_security_policy.rb | 25 ++ .../initializers/filter_parameter_logging.rb | 8 + .../config/initializers/inflections.rb | 16 + .../deploy-rails-8/config/locales/en.yml | 31 ++ test/fixtures/deploy-rails-8/config/puma.rb | 41 ++ test/fixtures/deploy-rails-8/config/routes.rb | 14 + .../deploy-rails-8/config/storage.yml | 34 ++ test/fixtures/deploy-rails-8/db/seeds.rb | 9 + test/fixtures/deploy-rails-8/lib/tasks/.keep | 0 test/fixtures/deploy-rails-8/log/.keep | 0 test/fixtures/deploy-rails-8/public/400.html | 114 ++++++ test/fixtures/deploy-rails-8/public/404.html | 114 ++++++ .../public/406-unsupported-browser.html | 114 ++++++ test/fixtures/deploy-rails-8/public/422.html | 114 ++++++ test/fixtures/deploy-rails-8/public/500.html | 114 ++++++ test/fixtures/deploy-rails-8/public/icon.png | Bin 0 -> 4166 bytes test/fixtures/deploy-rails-8/public/icon.svg | 3 + .../fixtures/deploy-rails-8/public/robots.txt | 1 + test/fixtures/deploy-rails-8/script/.keep | 0 test/fixtures/deploy-rails-8/storage/.keep | 0 .../test/application_system_test_case.rb | 5 + .../deploy-rails-8/test/controllers/.keep | 0 .../deploy-rails-8/test/fixtures/files/.keep | 0 .../deploy-rails-8/test/helpers/.keep | 0 .../deploy-rails-8/test/integration/.keep | 0 .../deploy-rails-8/test/mailers/.keep | 0 .../fixtures/deploy-rails-8/test/models/.keep | 0 .../fixtures/deploy-rails-8/test/system/.keep | 0 .../deploy-rails-8/test/test_helper.rb | 15 + test/fixtures/deploy-rails-8/vendor/.keep | 0 93 files changed, 2577 insertions(+), 55 deletions(-) create mode 100644 scanner/ruby_test.go create mode 100644 test/fixtures/deploy-rails-8/.dockerignore create mode 100644 test/fixtures/deploy-rails-8/.gitattributes create mode 100644 test/fixtures/deploy-rails-8/.github/dependabot.yml create mode 100644 test/fixtures/deploy-rails-8/.github/workflows/ci.yml create mode 100644 test/fixtures/deploy-rails-8/.gitignore create mode 100755 test/fixtures/deploy-rails-8/.kamal/hooks/docker-setup.sample create mode 100755 test/fixtures/deploy-rails-8/.kamal/hooks/post-deploy.sample create mode 100755 test/fixtures/deploy-rails-8/.kamal/hooks/post-proxy-reboot.sample create mode 100755 test/fixtures/deploy-rails-8/.kamal/hooks/pre-build.sample create mode 100755 test/fixtures/deploy-rails-8/.kamal/hooks/pre-connect.sample create mode 100755 test/fixtures/deploy-rails-8/.kamal/hooks/pre-deploy.sample create mode 100755 test/fixtures/deploy-rails-8/.kamal/hooks/pre-proxy-reboot.sample create mode 100644 test/fixtures/deploy-rails-8/.kamal/secrets create mode 100644 test/fixtures/deploy-rails-8/.rubocop.yml create mode 100644 test/fixtures/deploy-rails-8/.ruby-version create mode 100644 test/fixtures/deploy-rails-8/Dockerfile create mode 100644 test/fixtures/deploy-rails-8/Gemfile create mode 100644 test/fixtures/deploy-rails-8/Gemfile.lock create mode 100644 test/fixtures/deploy-rails-8/README.md create mode 100644 test/fixtures/deploy-rails-8/Rakefile create mode 100644 test/fixtures/deploy-rails-8/app/assets/images/.keep create mode 100644 test/fixtures/deploy-rails-8/app/assets/stylesheets/application.css create mode 100644 test/fixtures/deploy-rails-8/app/controllers/application_controller.rb create mode 100644 test/fixtures/deploy-rails-8/app/controllers/concerns/.keep create mode 100644 test/fixtures/deploy-rails-8/app/helpers/application_helper.rb create mode 100644 test/fixtures/deploy-rails-8/app/jobs/application_job.rb create mode 100644 test/fixtures/deploy-rails-8/app/mailers/application_mailer.rb create mode 100644 test/fixtures/deploy-rails-8/app/models/application_record.rb create mode 100644 test/fixtures/deploy-rails-8/app/models/concerns/.keep create mode 100644 test/fixtures/deploy-rails-8/app/views/layouts/application.html.erb create mode 100644 test/fixtures/deploy-rails-8/app/views/layouts/mailer.html.erb create mode 100644 test/fixtures/deploy-rails-8/app/views/layouts/mailer.text.erb create mode 100644 test/fixtures/deploy-rails-8/app/views/pwa/manifest.json.erb create mode 100644 test/fixtures/deploy-rails-8/app/views/pwa/service-worker.js create mode 100755 test/fixtures/deploy-rails-8/bin/brakeman create mode 100755 test/fixtures/deploy-rails-8/bin/bundle create mode 100755 test/fixtures/deploy-rails-8/bin/dev create mode 100755 test/fixtures/deploy-rails-8/bin/docker-entrypoint create mode 100755 test/fixtures/deploy-rails-8/bin/kamal create mode 100755 test/fixtures/deploy-rails-8/bin/rails create mode 100755 test/fixtures/deploy-rails-8/bin/rake create mode 100755 test/fixtures/deploy-rails-8/bin/rubocop create mode 100755 test/fixtures/deploy-rails-8/bin/setup create mode 100755 test/fixtures/deploy-rails-8/bin/thrust create mode 100644 test/fixtures/deploy-rails-8/config.ru create mode 100644 test/fixtures/deploy-rails-8/config/application.rb create mode 100644 test/fixtures/deploy-rails-8/config/boot.rb create mode 100644 test/fixtures/deploy-rails-8/config/cable.yml create mode 100644 test/fixtures/deploy-rails-8/config/credentials.yml.enc create mode 100644 test/fixtures/deploy-rails-8/config/database.yml create mode 100644 test/fixtures/deploy-rails-8/config/deploy.yml create mode 100644 test/fixtures/deploy-rails-8/config/environment.rb create mode 100644 test/fixtures/deploy-rails-8/config/environments/development.rb create mode 100644 test/fixtures/deploy-rails-8/config/environments/production.rb create mode 100644 test/fixtures/deploy-rails-8/config/environments/test.rb create mode 100644 test/fixtures/deploy-rails-8/config/initializers/assets.rb create mode 100644 test/fixtures/deploy-rails-8/config/initializers/content_security_policy.rb create mode 100644 test/fixtures/deploy-rails-8/config/initializers/filter_parameter_logging.rb create mode 100644 test/fixtures/deploy-rails-8/config/initializers/inflections.rb create mode 100644 test/fixtures/deploy-rails-8/config/locales/en.yml create mode 100644 test/fixtures/deploy-rails-8/config/puma.rb create mode 100644 test/fixtures/deploy-rails-8/config/routes.rb create mode 100644 test/fixtures/deploy-rails-8/config/storage.yml create mode 100644 test/fixtures/deploy-rails-8/db/seeds.rb create mode 100644 test/fixtures/deploy-rails-8/lib/tasks/.keep create mode 100644 test/fixtures/deploy-rails-8/log/.keep create mode 100644 test/fixtures/deploy-rails-8/public/400.html create mode 100644 test/fixtures/deploy-rails-8/public/404.html create mode 100644 test/fixtures/deploy-rails-8/public/406-unsupported-browser.html create mode 100644 test/fixtures/deploy-rails-8/public/422.html create mode 100644 test/fixtures/deploy-rails-8/public/500.html create mode 100644 test/fixtures/deploy-rails-8/public/icon.png create mode 100644 test/fixtures/deploy-rails-8/public/icon.svg create mode 100644 test/fixtures/deploy-rails-8/public/robots.txt create mode 100644 test/fixtures/deploy-rails-8/script/.keep create mode 100644 test/fixtures/deploy-rails-8/storage/.keep create mode 100644 test/fixtures/deploy-rails-8/test/application_system_test_case.rb create mode 100644 test/fixtures/deploy-rails-8/test/controllers/.keep create mode 100644 test/fixtures/deploy-rails-8/test/fixtures/files/.keep create mode 100644 test/fixtures/deploy-rails-8/test/helpers/.keep create mode 100644 test/fixtures/deploy-rails-8/test/integration/.keep create mode 100644 test/fixtures/deploy-rails-8/test/mailers/.keep create mode 100644 test/fixtures/deploy-rails-8/test/models/.keep create mode 100644 test/fixtures/deploy-rails-8/test/system/.keep create mode 100644 test/fixtures/deploy-rails-8/test/test_helper.rb create mode 100644 test/fixtures/deploy-rails-8/vendor/.keep diff --git a/deploy.rb b/deploy.rb index 0f7ea8962c..10b1567f03 100755 --- a/deploy.rb +++ b/deploy.rb @@ -203,6 +203,7 @@ case RUNTIME_LANGUAGE when "ruby" exec_capture("rvm install #{version}") + exec_capture("rvm use #{version} --default") when "php" major, minor = Gem::Version.new(version).segments php_version = "#{major}.#{minor}" diff --git a/deploy/common.rb b/deploy/common.rb index 5ed9a03f41..ec2d57c8dd 100644 --- a/deploy/common.rb +++ b/deploy/common.rb @@ -100,7 +100,7 @@ def exec_capture(cmd, display: nil, log: true) out_mutex = Mutex.new output = "" - status = Open3.popen3(cmd) do |stdin, stdout, stderr, wait_thr| + status = Open3.popen3("/bin/bash", "-lc", cmd) do |stdin, stdout, stderr, wait_thr| pid = wait_thr.pid stdin.close_write diff --git a/deployer.Dockerfile b/deployer.Dockerfile index 4b3e35eff2..3fe58edb91 100644 --- a/deployer.Dockerfile +++ b/deployer.Dockerfile @@ -3,9 +3,9 @@ FROM ubuntu:20.04 ENV DEBIAN_FRONTEND=noninteractive RUN apt update && \ - apt install -y --no-install-recommends software-properties-common && \ - apt-add-repository -y ppa:rael-gc/rvm && apt-add-repository -y ppa:ondrej/php && apt update && \ - apt install -y --no-install-recommends ca-certificates git curl clang g++ make unzip locales openssl libssl-dev rvm build-essential libxml2 libpq-dev libyaml-dev procps gawk autoconf automake bison libffi-dev libgdbm-dev libncurses5-dev libsqlite3-dev libtool pkg-config sqlite3 zlib1g-dev libreadline6-dev locales mlocate + apt install -y software-properties-common && \ + apt-add-repository -y ppa:ondrej/php && apt update && \ + apt install -y --no-install-recommends ca-certificates git curl clang g++ make unzip locales openssl libssl-dev build-essential libxml2 libpq-dev libyaml-dev procps gawk autoconf automake bison libffi-dev libgdbm-dev libncurses5-dev libsqlite3-dev libtool pkg-config sqlite3 zlib1g-dev libreadline6-dev locales mlocate SHELL ["/bin/bash", "-lc"] @@ -30,11 +30,21 @@ ENV DEFAULT_RUBY_VERSION=3.1.6 \ ARG NODE_BUILD_VERSION=5.3.8 # install a ruby to run the initial script -RUN /bin/bash -lc 'rvm install $DEFAULT_RUBY_VERSION && rvm --default use $DEFAULT_RUBY_VERSION && gem update --system && gem install bundler' +# RUN echo 'source "/etc/profile.d/rvm.sh"' >> ~/.bashrc && \ +# usermod -a -G rvm root && \ +# rvm install $DEFAULT_RUBY_VERSION && rvm --default use $DEFAULT_RUBY_VERSION && gem update --system && gem install bundler + +RUN gpg --keyserver keyserver.ubuntu.com --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB && \ + curl -sSL https://get.rvm.io | bash -s stable && \ + usermod -a -G rvm root && \ + source /etc/profile.d/rvm.sh && \ + rvm install $DEFAULT_RUBY_VERSION && rvm --default use $DEFAULT_RUBY_VERSION && gem update --system && gem install bundler && \ + echo -e "\nsource /etc/profile.d/rvm.sh" >> ~/.bash_profile && \ + echo -e "\nrvm use default &> /dev/null" >> ~/.bash_profile # install mise RUN curl https://mise.run | MISE_VERSION=v2024.8.6 sh && \ - echo -e "\n\nexport PATH=\"$HOME/.local/bin:$HOME/.local/share/mise/shims:$PATH\"" >> ~/.profile + echo -e "\n\nexport PATH=\"$PATH:$HOME/.local/bin:$HOME/.local/share/mise/shims\"" >> ~/.profile ENV MISE_PYTHON_COMPILE=false @@ -65,5 +75,5 @@ COPY deploy /deploy RUN mkdir -p /usr/src/app # need a login shell for rvm to work properly... -ENTRYPOINT ["/bin/bash", "-lc"] +ENTRYPOINT ["/bin/bash", "--login", "-c"] CMD ["/deploy.rb"] \ No newline at end of file diff --git a/scanner/rails.go b/scanner/rails.go index cd843d7726..4655a87030 100644 --- a/scanner/rails.go +++ b/scanner/rails.go @@ -70,40 +70,7 @@ func configureRails(sourceDir string, config *ScannerConfig) (*SourceInfo, error // add ruby version - var rubyVersion string - - // add ruby version from .ruby-version file - versionFile, err := os.ReadFile(".ruby-version") - if err == nil { - re := regexp.MustCompile(`ruby-(\d+\.\d+\.\d+)`) - matches := re.FindStringSubmatch(string(versionFile)) - if len(matches) >= 2 { - rubyVersion = matches[1] - } - } - - if rubyVersion == "" { - // add ruby version from Gemfile - gemfile, err := os.ReadFile("Gemfile") - if err == nil { - re := regexp.MustCompile(`(?m)^ruby\s+["'](\d+\.\d+\.\d+)["']`) - matches := re.FindStringSubmatch(string(gemfile)) - if len(matches) >= 2 { - rubyVersion = matches[1] - } - } - } - - if rubyVersion == "" { - versionOutput, err := exec.Command("ruby", "--version").Output() - if err == nil { - re := regexp.MustCompile(`ruby (\d+\.\d+\.\d+)`) - matches := re.FindStringSubmatch(string(versionOutput)) - if len(matches) >= 2 { - rubyVersion = matches[1] - } - } - } + rubyVersion, _ := extractRubyVersion("Gemfile.lock", "Gemfile", ".ruby-version") if rubyVersion != "" { s.Runtime = plan.RuntimeStruct{Language: "ruby", Version: rubyVersion} @@ -130,11 +97,11 @@ func configureRails(sourceDir string, config *ScannerConfig) (*SourceInfo, error // enable redis if there are any action cable / anycable channels redis := false files, err := filepath.Glob("app/channels/*.rb") - if err == nil && len(files) > 0 { - redis = true + if err == nil { + redis = len(files) > 0 } - if redis == false { + if !redis { files, err = filepath.Glob("app/views/*") if err == nil && len(files) > 0 { for _, file := range files { diff --git a/scanner/ruby.go b/scanner/ruby.go index de2d11d885..d0a68a64e8 100644 --- a/scanner/ruby.go +++ b/scanner/ruby.go @@ -55,6 +55,7 @@ func extractRubyVersion(lockfilePath string, gemfilePath string, rubyVersionPath for i, name := range re.SubexpNames() { if len(m) > 0 && name == "version" { version = m[i] + break } } } @@ -66,14 +67,7 @@ func extractRubyVersion(lockfilePath string, gemfilePath string, rubyVersionPath return "", err } - re := regexp.MustCompile(`ruby \"(?P[\d.]+)\"`) - m := re.FindStringSubmatch(string(gemfileContents)) - - for i, name := range re.SubexpNames() { - if len(m) > 0 && name == "version" { - version = m[i] - } - } + version = extractGemfileRuby(gemfileContents) } if version == "" { @@ -84,13 +78,11 @@ func extractRubyVersion(lockfilePath string, gemfilePath string, rubyVersionPath return "", err } - version = string(versionString) + version = strings.TrimSpace(string(versionString)) } } if version == "" { - version = "3.3.5" - out, err := exec.Command("ruby", "-v").Output() if err == nil { @@ -101,6 +93,7 @@ func extractRubyVersion(lockfilePath string, gemfilePath string, rubyVersionPath for i, name := range re.SubexpNames() { if len(m) > 0 && name == "version" { version = m[i] + break } } } @@ -108,3 +101,16 @@ func extractRubyVersion(lockfilePath string, gemfilePath string, rubyVersionPath return version, nil } + +func extractGemfileRuby(contents []byte) string { + re := regexp.MustCompile(`ruby ["'](?P[\d.]+)["']`) + m := re.FindStringSubmatch(string(contents)) + + for i, name := range re.SubexpNames() { + if len(m) > 0 && name == "version" { + return m[i] + } + } + + return "" +} diff --git a/scanner/ruby_test.go b/scanner/ruby_test.go new file mode 100644 index 0000000000..760b335899 --- /dev/null +++ b/scanner/ruby_test.go @@ -0,0 +1,21 @@ +package scanner + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestRubyVersionParsing(t *testing.T) { + v := extractGemfileRuby([]byte(` + source "https://rubygems.org" + + ruby '3.1.0' + `)) + + require.Equal(t, v, "3.1.0") + + v = extractGemfileRuby([]byte(`ruby "3.1.0"`)) + + require.Equal(t, v, "3.1.0") +} diff --git a/test/deployer/deployer_test.go b/test/deployer/deployer_test.go index 48f1a98233..d4776442bf 100644 --- a/test/deployer/deployer_test.go +++ b/test/deployer/deployer_test.go @@ -157,6 +157,28 @@ func TestLaunchGoFromRepo(t *testing.T) { require.Contains(t, string(body), "I'm running in the yyz region") } +func TestLaunchRails8(t *testing.T) { + deploy := testDeployer(t, + withFixtureApp("deploy-rails-8"), + createRandomApp, + testlib.WithoutCustomize, + testlib.WithouExtensions, + testlib.DeployNow, + withWorkDirAppSource, + ) + + manifest, err := deploy.Output().ArtifactManifest() + require.NoError(t, err) + require.NotNil(t, manifest) + + require.Equal(t, manifest.Plan.Runtime.Language, "ruby") + + appName := deploy.Extra["appName"].(string) + + _, err = testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev/up", appName)) + require.NoError(t, err) +} + func createRandomApp(d *testlib.DeployTestRun) { appName := d.CreateRandomAppName() require.NotEmpty(d, appName) diff --git a/test/fixtures/deploy-rails-8/.dockerignore b/test/fixtures/deploy-rails-8/.dockerignore new file mode 100644 index 0000000000..75405937b6 --- /dev/null +++ b/test/fixtures/deploy-rails-8/.dockerignore @@ -0,0 +1,47 @@ +# See https://docs.docker.com/engine/reference/builder/#dockerignore-file for more about ignoring files. + +# Ignore git directory. +/.git/ +/.gitignore + +# Ignore bundler config. +/.bundle + +# Ignore all environment files. +/.env* + +# Ignore all default key files. +/config/master.key +/config/credentials/*.key + +# Ignore all logfiles and tempfiles. +/log/* +/tmp/* +!/log/.keep +!/tmp/.keep + +# Ignore pidfiles, but keep the directory. +/tmp/pids/* +!/tmp/pids/.keep + +# Ignore storage (uploaded files in development and any SQLite databases). +/storage/* +!/storage/.keep +/tmp/storage/* +!/tmp/storage/.keep + +# Ignore assets. +/node_modules/ +/app/assets/builds/* +!/app/assets/builds/.keep +/public/assets + +# Ignore CI service files. +/.github + +# Ignore development files +/.devcontainer + +# Ignore Docker-related files +/.dockerignore +/Dockerfile* diff --git a/test/fixtures/deploy-rails-8/.gitattributes b/test/fixtures/deploy-rails-8/.gitattributes new file mode 100644 index 0000000000..8dc4323435 --- /dev/null +++ b/test/fixtures/deploy-rails-8/.gitattributes @@ -0,0 +1,9 @@ +# See https://git-scm.com/docs/gitattributes for more about git attribute files. + +# Mark the database schema as having been generated. +db/schema.rb linguist-generated + +# Mark any vendored files as having been vendored. +vendor/* linguist-vendored +config/credentials/*.yml.enc diff=rails_credentials +config/credentials.yml.enc diff=rails_credentials diff --git a/test/fixtures/deploy-rails-8/.github/dependabot.yml b/test/fixtures/deploy-rails-8/.github/dependabot.yml new file mode 100644 index 0000000000..f0527e6be1 --- /dev/null +++ b/test/fixtures/deploy-rails-8/.github/dependabot.yml @@ -0,0 +1,12 @@ +version: 2 +updates: +- package-ecosystem: bundler + directory: "/" + schedule: + interval: daily + open-pull-requests-limit: 10 +- package-ecosystem: github-actions + directory: "/" + schedule: + interval: daily + open-pull-requests-limit: 10 diff --git a/test/fixtures/deploy-rails-8/.github/workflows/ci.yml b/test/fixtures/deploy-rails-8/.github/workflows/ci.yml new file mode 100644 index 0000000000..00af91f692 --- /dev/null +++ b/test/fixtures/deploy-rails-8/.github/workflows/ci.yml @@ -0,0 +1,90 @@ +name: CI + +on: + pull_request: + push: + branches: [ main ] + +jobs: + scan_ruby: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: .ruby-version + bundler-cache: true + + - name: Scan for common Rails security vulnerabilities using static analysis + run: bin/brakeman --no-pager + + scan_js: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: .ruby-version + bundler-cache: true + + - name: Scan for security vulnerabilities in JavaScript dependencies + run: bin/importmap audit + + lint: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: .ruby-version + bundler-cache: true + + - name: Lint code for consistent style + run: bin/rubocop -f github + + test: + runs-on: ubuntu-latest + + # services: + # redis: + # image: redis + # ports: + # - 6379:6379 + # options: --health-cmd "redis-cli ping" --health-interval 10s --health-timeout 5s --health-retries 5 + steps: + - name: Install packages + run: sudo apt-get update && sudo apt-get install --no-install-recommends -y google-chrome-stable curl libjemalloc2 libvips sqlite3 + + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: .ruby-version + bundler-cache: true + + - name: Run tests + env: + RAILS_ENV: test + # REDIS_URL: redis://localhost:6379/0 + run: bin/rails db:test:prepare test test:system + + - name: Keep screenshots from failed system tests + uses: actions/upload-artifact@v4 + if: failure() + with: + name: screenshots + path: ${{ github.workspace }}/tmp/screenshots + if-no-files-found: ignore diff --git a/test/fixtures/deploy-rails-8/.gitignore b/test/fixtures/deploy-rails-8/.gitignore new file mode 100644 index 0000000000..f92525ca5e --- /dev/null +++ b/test/fixtures/deploy-rails-8/.gitignore @@ -0,0 +1,34 @@ +# See https://help.github.com/articles/ignoring-files for more about ignoring files. +# +# Temporary files generated by your text editor or operating system +# belong in git's global ignore instead: +# `$XDG_CONFIG_HOME/git/ignore` or `~/.config/git/ignore` + +# Ignore bundler config. +/.bundle + +# Ignore all environment files. +/.env* + +# Ignore all logfiles and tempfiles. +/log/* +/tmp/* +!/log/.keep +!/tmp/.keep + +# Ignore pidfiles, but keep the directory. +/tmp/pids/* +!/tmp/pids/ +!/tmp/pids/.keep + +# Ignore storage (uploaded files in development and any SQLite databases). +/storage/* +!/storage/.keep +/tmp/storage/* +!/tmp/storage/ +!/tmp/storage/.keep + +/public/assets + +# Ignore master key for decrypting credentials and more. +/config/master.key diff --git a/test/fixtures/deploy-rails-8/.kamal/hooks/docker-setup.sample b/test/fixtures/deploy-rails-8/.kamal/hooks/docker-setup.sample new file mode 100755 index 0000000000..2fb07d7d7a --- /dev/null +++ b/test/fixtures/deploy-rails-8/.kamal/hooks/docker-setup.sample @@ -0,0 +1,3 @@ +#!/bin/sh + +echo "Docker set up on $KAMAL_HOSTS..." diff --git a/test/fixtures/deploy-rails-8/.kamal/hooks/post-deploy.sample b/test/fixtures/deploy-rails-8/.kamal/hooks/post-deploy.sample new file mode 100755 index 0000000000..75efafc10e --- /dev/null +++ b/test/fixtures/deploy-rails-8/.kamal/hooks/post-deploy.sample @@ -0,0 +1,14 @@ +#!/bin/sh + +# A sample post-deploy hook +# +# These environment variables are available: +# KAMAL_RECORDED_AT +# KAMAL_PERFORMER +# KAMAL_VERSION +# KAMAL_HOSTS +# KAMAL_ROLE (if set) +# KAMAL_DESTINATION (if set) +# KAMAL_RUNTIME + +echo "$KAMAL_PERFORMER deployed $KAMAL_VERSION to $KAMAL_DESTINATION in $KAMAL_RUNTIME seconds" diff --git a/test/fixtures/deploy-rails-8/.kamal/hooks/post-proxy-reboot.sample b/test/fixtures/deploy-rails-8/.kamal/hooks/post-proxy-reboot.sample new file mode 100755 index 0000000000..1435a677f2 --- /dev/null +++ b/test/fixtures/deploy-rails-8/.kamal/hooks/post-proxy-reboot.sample @@ -0,0 +1,3 @@ +#!/bin/sh + +echo "Rebooted kamal-proxy on $KAMAL_HOSTS" diff --git a/test/fixtures/deploy-rails-8/.kamal/hooks/pre-build.sample b/test/fixtures/deploy-rails-8/.kamal/hooks/pre-build.sample new file mode 100755 index 0000000000..f87d81130b --- /dev/null +++ b/test/fixtures/deploy-rails-8/.kamal/hooks/pre-build.sample @@ -0,0 +1,51 @@ +#!/bin/sh + +# A sample pre-build hook +# +# Checks: +# 1. We have a clean checkout +# 2. A remote is configured +# 3. The branch has been pushed to the remote +# 4. The version we are deploying matches the remote +# +# These environment variables are available: +# KAMAL_RECORDED_AT +# KAMAL_PERFORMER +# KAMAL_VERSION +# KAMAL_HOSTS +# KAMAL_ROLE (if set) +# KAMAL_DESTINATION (if set) + +if [ -n "$(git status --porcelain)" ]; then + echo "Git checkout is not clean, aborting..." >&2 + git status --porcelain >&2 + exit 1 +fi + +first_remote=$(git remote) + +if [ -z "$first_remote" ]; then + echo "No git remote set, aborting..." >&2 + exit 1 +fi + +current_branch=$(git branch --show-current) + +if [ -z "$current_branch" ]; then + echo "Not on a git branch, aborting..." >&2 + exit 1 +fi + +remote_head=$(git ls-remote $first_remote --tags $current_branch | cut -f1) + +if [ -z "$remote_head" ]; then + echo "Branch not pushed to remote, aborting..." >&2 + exit 1 +fi + +if [ "$KAMAL_VERSION" != "$remote_head" ]; then + echo "Version ($KAMAL_VERSION) does not match remote HEAD ($remote_head), aborting..." >&2 + exit 1 +fi + +exit 0 diff --git a/test/fixtures/deploy-rails-8/.kamal/hooks/pre-connect.sample b/test/fixtures/deploy-rails-8/.kamal/hooks/pre-connect.sample new file mode 100755 index 0000000000..18e61d7e5a --- /dev/null +++ b/test/fixtures/deploy-rails-8/.kamal/hooks/pre-connect.sample @@ -0,0 +1,47 @@ +#!/usr/bin/env ruby + +# A sample pre-connect check +# +# Warms DNS before connecting to hosts in parallel +# +# These environment variables are available: +# KAMAL_RECORDED_AT +# KAMAL_PERFORMER +# KAMAL_VERSION +# KAMAL_HOSTS +# KAMAL_ROLE (if set) +# KAMAL_DESTINATION (if set) +# KAMAL_RUNTIME + +hosts = ENV["KAMAL_HOSTS"].split(",") +results = nil +max = 3 + +elapsed = Benchmark.realtime do + results = hosts.map do |host| + Thread.new do + tries = 1 + + begin + Socket.getaddrinfo(host, 0, Socket::AF_UNSPEC, Socket::SOCK_STREAM, nil, Socket::AI_CANONNAME) + rescue SocketError + if tries < max + puts "Retrying DNS warmup: #{host}" + tries += 1 + sleep rand + retry + else + puts "DNS warmup failed: #{host}" + host + end + end + + tries + end + end.map(&:value) +end + +retries = results.sum - hosts.size +nopes = results.count { |r| r == max } + +puts "Prewarmed %d DNS lookups in %.2f sec: %d retries, %d failures" % [ hosts.size, elapsed, retries, nopes ] diff --git a/test/fixtures/deploy-rails-8/.kamal/hooks/pre-deploy.sample b/test/fixtures/deploy-rails-8/.kamal/hooks/pre-deploy.sample new file mode 100755 index 0000000000..1b280c719e --- /dev/null +++ b/test/fixtures/deploy-rails-8/.kamal/hooks/pre-deploy.sample @@ -0,0 +1,109 @@ +#!/usr/bin/env ruby + +# A sample pre-deploy hook +# +# Checks the Github status of the build, waiting for a pending build to complete for up to 720 seconds. +# +# Fails unless the combined status is "success" +# +# These environment variables are available: +# KAMAL_RECORDED_AT +# KAMAL_PERFORMER +# KAMAL_VERSION +# KAMAL_HOSTS +# KAMAL_COMMAND +# KAMAL_SUBCOMMAND +# KAMAL_ROLE (if set) +# KAMAL_DESTINATION (if set) + +# Only check the build status for production deployments +if ENV["KAMAL_COMMAND"] == "rollback" || ENV["KAMAL_DESTINATION"] != "production" + exit 0 +end + +require "bundler/inline" + +# true = install gems so this is fast on repeat invocations +gemfile(true, quiet: true) do + source "https://rubygems.org" + + gem "octokit" + gem "faraday-retry" +end + +MAX_ATTEMPTS = 72 +ATTEMPTS_GAP = 10 + +def exit_with_error(message) + $stderr.puts message + exit 1 +end + +class GithubStatusChecks + attr_reader :remote_url, :git_sha, :github_client, :combined_status + + def initialize + @remote_url = `git config --get remote.origin.url`.strip.delete_prefix("https://github.com/") + @git_sha = `git rev-parse HEAD`.strip + @github_client = Octokit::Client.new(access_token: ENV["GITHUB_TOKEN"]) + refresh! + end + + def refresh! + @combined_status = github_client.combined_status(remote_url, git_sha) + end + + def state + combined_status[:state] + end + + def first_status_url + first_status = combined_status[:statuses].find { |status| status[:state] == state } + first_status && first_status[:target_url] + end + + def complete_count + combined_status[:statuses].count { |status| status[:state] != "pending"} + end + + def total_count + combined_status[:statuses].count + end + + def current_status + if total_count > 0 + "Completed #{complete_count}/#{total_count} checks, see #{first_status_url} ..." + else + "Build not started..." + end + end +end + + +$stdout.sync = true + +puts "Checking build status..." +attempts = 0 +checks = GithubStatusChecks.new + +begin + loop do + case checks.state + when "success" + puts "Checks passed, see #{checks.first_status_url}" + exit 0 + when "failure" + exit_with_error "Checks failed, see #{checks.first_status_url}" + when "pending" + attempts += 1 + end + + exit_with_error "Checks are still pending, gave up after #{MAX_ATTEMPTS * ATTEMPTS_GAP} seconds" if attempts == MAX_ATTEMPTS + + puts checks.current_status + sleep(ATTEMPTS_GAP) + checks.refresh! + end +rescue Octokit::NotFound + exit_with_error "Build status could not be found" +end diff --git a/test/fixtures/deploy-rails-8/.kamal/hooks/pre-proxy-reboot.sample b/test/fixtures/deploy-rails-8/.kamal/hooks/pre-proxy-reboot.sample new file mode 100755 index 0000000000..061f8059e6 --- /dev/null +++ b/test/fixtures/deploy-rails-8/.kamal/hooks/pre-proxy-reboot.sample @@ -0,0 +1,3 @@ +#!/bin/sh + +echo "Rebooting kamal-proxy on $KAMAL_HOSTS..." diff --git a/test/fixtures/deploy-rails-8/.kamal/secrets b/test/fixtures/deploy-rails-8/.kamal/secrets new file mode 100644 index 0000000000..9a771a3985 --- /dev/null +++ b/test/fixtures/deploy-rails-8/.kamal/secrets @@ -0,0 +1,17 @@ +# Secrets defined here are available for reference under registry/password, env/secret, builder/secrets, +# and accessories/*/env/secret in config/deploy.yml. All secrets should be pulled from either +# password manager, ENV, or a file. DO NOT ENTER RAW CREDENTIALS HERE! This file needs to be safe for git. + +# Example of extracting secrets from 1password (or another compatible pw manager) +# SECRETS=$(kamal secrets fetch --adapter 1password --account your-account --from Vault/Item KAMAL_REGISTRY_PASSWORD RAILS_MASTER_KEY) +# KAMAL_REGISTRY_PASSWORD=$(kamal secrets extract KAMAL_REGISTRY_PASSWORD ${SECRETS}) +# RAILS_MASTER_KEY=$(kamal secrets extract RAILS_MASTER_KEY ${SECRETS}) + +# Use a GITHUB_TOKEN if private repositories are needed for the image +# GITHUB_TOKEN=$(gh config get -h github.com oauth_token) + +# Grab the registry password from ENV +KAMAL_REGISTRY_PASSWORD=$KAMAL_REGISTRY_PASSWORD + +# Improve security by using a password manager. Never check config/master.key into git! +RAILS_MASTER_KEY=$(cat config/master.key) diff --git a/test/fixtures/deploy-rails-8/.rubocop.yml b/test/fixtures/deploy-rails-8/.rubocop.yml new file mode 100644 index 0000000000..f9d86d4a54 --- /dev/null +++ b/test/fixtures/deploy-rails-8/.rubocop.yml @@ -0,0 +1,8 @@ +# Omakase Ruby styling for Rails +inherit_gem: { rubocop-rails-omakase: rubocop.yml } + +# Overwrite or add rules to create your own house style +# +# # Use `[a, [b, c]]` not `[ a, [ b, c ] ]` +# Layout/SpaceInsideArrayLiteralBrackets: +# Enabled: false diff --git a/test/fixtures/deploy-rails-8/.ruby-version b/test/fixtures/deploy-rails-8/.ruby-version new file mode 100644 index 0000000000..ab96aa90d1 --- /dev/null +++ b/test/fixtures/deploy-rails-8/.ruby-version @@ -0,0 +1 @@ +ruby-3.2.3 diff --git a/test/fixtures/deploy-rails-8/Dockerfile b/test/fixtures/deploy-rails-8/Dockerfile new file mode 100644 index 0000000000..b08e2aa69a --- /dev/null +++ b/test/fixtures/deploy-rails-8/Dockerfile @@ -0,0 +1,72 @@ +# syntax=docker/dockerfile:1 +# check=error=true + +# This Dockerfile is designed for production, not development. Use with Kamal or build'n'run by hand: +# docker build -t helkp . +# docker run -d -p 80:80 -e RAILS_MASTER_KEY= --name helkp helkp + +# For a containerized dev environment, see Dev Containers: https://guides.rubyonrails.org/getting_started_with_devcontainer.html + +# Make sure RUBY_VERSION matches the Ruby version in .ruby-version +ARG RUBY_VERSION=3.2.3 +FROM docker.io/library/ruby:$RUBY_VERSION-slim AS base + +# Rails app lives here +WORKDIR /rails + +# Install base packages +RUN apt-get update -qq && \ + apt-get install --no-install-recommends -y curl libjemalloc2 libvips sqlite3 && \ + rm -rf /var/lib/apt/lists /var/cache/apt/archives + +# Set production environment +ENV RAILS_ENV="production" \ + BUNDLE_DEPLOYMENT="1" \ + BUNDLE_PATH="/usr/local/bundle" \ + BUNDLE_WITHOUT="development" + +# Throw-away build stage to reduce size of final image +FROM base AS build + +# Install packages needed to build gems +RUN apt-get update -qq && \ + apt-get install --no-install-recommends -y build-essential git pkg-config && \ + rm -rf /var/lib/apt/lists /var/cache/apt/archives + +# Install application gems +COPY Gemfile Gemfile.lock ./ +RUN bundle install && \ + rm -rf ~/.bundle/ "${BUNDLE_PATH}"/ruby/*/cache "${BUNDLE_PATH}"/ruby/*/bundler/gems/*/.git && \ + bundle exec bootsnap precompile --gemfile + +# Copy application code +COPY . . + +# Precompile bootsnap code for faster boot times +RUN bundle exec bootsnap precompile app/ lib/ + +# Precompiling assets for production without requiring secret RAILS_MASTER_KEY +RUN SECRET_KEY_BASE_DUMMY=1 ./bin/rails assets:precompile + + + + +# Final stage for app image +FROM base + +# Copy built artifacts: gems, application +COPY --from=build "${BUNDLE_PATH}" "${BUNDLE_PATH}" +COPY --from=build /rails /rails + +# Run and own only the runtime files as a non-root user for security +RUN groupadd --system --gid 1000 rails && \ + useradd rails --uid 1000 --gid 1000 --create-home --shell /bin/bash && \ + chown -R rails:rails db log storage tmp +USER 1000:1000 + +# Entrypoint prepares the database. +ENTRYPOINT ["/rails/bin/docker-entrypoint"] + +# Start server via Thruster by default, this can be overwritten at runtime +EXPOSE 80 +CMD ["./bin/thrust", "./bin/rails", "server"] diff --git a/test/fixtures/deploy-rails-8/Gemfile b/test/fixtures/deploy-rails-8/Gemfile new file mode 100644 index 0000000000..906ea1e30c --- /dev/null +++ b/test/fixtures/deploy-rails-8/Gemfile @@ -0,0 +1,63 @@ +source "https://rubygems.org" + +# Bundle edge Rails instead: gem "rails", github: "rails/rails", branch: "main" +gem "rails", "~> 8.0.0.beta1" +# The modern asset pipeline for Rails [https://github.com/rails/propshaft] +gem "propshaft" +# Use sqlite3 as the database for Active Record +gem "sqlite3", ">= 2.1" +# Use the Puma web server [https://github.com/puma/puma] +gem "puma", ">= 5.0" +# Use JavaScript with ESM import maps [https://github.com/rails/importmap-rails] +gem "importmap-rails" +# Hotwire's SPA-like page accelerator [https://turbo.hotwired.dev] +gem "turbo-rails" +# Hotwire's modest JavaScript framework [https://stimulus.hotwired.dev] +gem "stimulus-rails" +# Build JSON APIs with ease [https://github.com/rails/jbuilder] +gem "jbuilder" + +# Use Active Model has_secure_password [https://guides.rubyonrails.org/active_model_basics.html#securepassword] +# gem "bcrypt", "~> 3.1.7" + +# Windows does not include zoneinfo files, so bundle the tzinfo-data gem +gem "tzinfo-data", platforms: %i[ windows jruby ] + +# Use the database-backed adapters for Rails.cache, Active Job, and Action Cable +gem "solid_cache" +gem "solid_queue" +gem "solid_cable" + +# Reduces boot times through caching; required in config/boot.rb +gem "bootsnap", require: false + +# Deploy this application anywhere as a Docker container [https://kamal-deploy.org] +gem "kamal", ">= 2.0.0.rc2", require: false + +# Add HTTP asset caching/compression and X-Sendfile acceleration to Puma [https://github.com/basecamp/thruster/] +gem "thruster", require: false + +# Use Active Storage variants [https://guides.rubyonrails.org/active_storage_overview.html#transforming-images] +# gem "image_processing", "~> 1.2" + +group :development, :test do + # See https://guides.rubyonrails.org/debugging_rails_applications.html#debugging-with-the-debug-gem + gem "debug", platforms: %i[ mri windows ], require: "debug/prelude" + + # Static analysis for security vulnerabilities [https://brakemanscanner.org/] + gem "brakeman", require: false + + # Omakase Ruby styling [https://github.com/rails/rubocop-rails-omakase/] + gem "rubocop-rails-omakase", require: false +end + +group :development do + # Use console on exceptions pages [https://github.com/rails/web-console] + gem "web-console" +end + +group :test do + # Use system testing [https://guides.rubyonrails.org/testing.html#system-testing] + gem "capybara" + gem "selenium-webdriver" +end diff --git a/test/fixtures/deploy-rails-8/Gemfile.lock b/test/fixtures/deploy-rails-8/Gemfile.lock new file mode 100644 index 0000000000..a4a8547213 --- /dev/null +++ b/test/fixtures/deploy-rails-8/Gemfile.lock @@ -0,0 +1,349 @@ +GEM + remote: https://rubygems.org/ + specs: + actioncable (8.0.0.beta1) + actionpack (= 8.0.0.beta1) + activesupport (= 8.0.0.beta1) + nio4r (~> 2.0) + websocket-driver (>= 0.6.1) + zeitwerk (~> 2.6) + actionmailbox (8.0.0.beta1) + actionpack (= 8.0.0.beta1) + activejob (= 8.0.0.beta1) + activerecord (= 8.0.0.beta1) + activestorage (= 8.0.0.beta1) + activesupport (= 8.0.0.beta1) + mail (>= 2.8.0) + actionmailer (8.0.0.beta1) + actionpack (= 8.0.0.beta1) + actionview (= 8.0.0.beta1) + activejob (= 8.0.0.beta1) + activesupport (= 8.0.0.beta1) + mail (>= 2.8.0) + rails-dom-testing (~> 2.2) + actionpack (8.0.0.beta1) + actionview (= 8.0.0.beta1) + activesupport (= 8.0.0.beta1) + nokogiri (>= 1.8.5) + rack (>= 2.2.4) + rack-session (>= 1.0.1) + rack-test (>= 0.6.3) + rails-dom-testing (~> 2.2) + rails-html-sanitizer (~> 1.6) + useragent (~> 0.16) + actiontext (8.0.0.beta1) + actionpack (= 8.0.0.beta1) + activerecord (= 8.0.0.beta1) + activestorage (= 8.0.0.beta1) + activesupport (= 8.0.0.beta1) + globalid (>= 0.6.0) + nokogiri (>= 1.8.5) + actionview (8.0.0.beta1) + activesupport (= 8.0.0.beta1) + builder (~> 3.1) + erubi (~> 1.11) + rails-dom-testing (~> 2.2) + rails-html-sanitizer (~> 1.6) + activejob (8.0.0.beta1) + activesupport (= 8.0.0.beta1) + globalid (>= 0.3.6) + activemodel (8.0.0.beta1) + activesupport (= 8.0.0.beta1) + activerecord (8.0.0.beta1) + activemodel (= 8.0.0.beta1) + activesupport (= 8.0.0.beta1) + timeout (>= 0.4.0) + activestorage (8.0.0.beta1) + actionpack (= 8.0.0.beta1) + activejob (= 8.0.0.beta1) + activerecord (= 8.0.0.beta1) + activesupport (= 8.0.0.beta1) + marcel (~> 1.0) + activesupport (8.0.0.beta1) + base64 + benchmark (>= 0.3) + bigdecimal + concurrent-ruby (~> 1.0, >= 1.3.1) + connection_pool (>= 2.2.5) + drb + i18n (>= 1.6, < 2) + logger (>= 1.4.2) + minitest (>= 5.1) + securerandom (>= 0.3) + tzinfo (~> 2.0, >= 2.0.5) + uri (>= 0.13.1) + addressable (2.8.7) + public_suffix (>= 2.0.2, < 7.0) + ast (2.4.2) + base64 (0.2.0) + bcrypt_pbkdf (1.1.1) + benchmark (0.3.0) + bigdecimal (3.1.8) + bindex (0.8.1) + bootsnap (1.18.4) + msgpack (~> 1.2) + brakeman (6.2.1) + racc + builder (3.3.0) + capybara (3.40.0) + addressable + matrix + mini_mime (>= 0.1.3) + nokogiri (~> 1.11) + rack (>= 1.6.0) + rack-test (>= 0.6.3) + regexp_parser (>= 1.5, < 3.0) + xpath (~> 3.2) + concurrent-ruby (1.3.4) + connection_pool (2.4.1) + crass (1.0.6) + date (3.3.4) + debug (1.9.2) + irb (~> 1.10) + reline (>= 0.3.8) + dotenv (3.1.4) + drb (2.2.1) + ed25519 (1.3.0) + erubi (1.13.0) + et-orbi (1.2.11) + tzinfo + fugit (1.11.1) + et-orbi (~> 1, >= 1.2.11) + raabro (~> 1.4) + globalid (1.2.1) + activesupport (>= 6.1) + i18n (1.14.6) + concurrent-ruby (~> 1.0) + importmap-rails (2.0.3) + actionpack (>= 6.0.0) + activesupport (>= 6.0.0) + railties (>= 6.0.0) + io-console (0.7.2) + irb (1.14.1) + rdoc (>= 4.0.0) + reline (>= 0.4.2) + jbuilder (2.13.0) + actionview (>= 5.0.0) + activesupport (>= 5.0.0) + json (2.7.2) + kamal (2.2.2) + activesupport (>= 7.0) + base64 (~> 0.2) + bcrypt_pbkdf (~> 1.0) + concurrent-ruby (~> 1.2) + dotenv (~> 3.1) + ed25519 (~> 1.2) + net-ssh (~> 7.0) + sshkit (>= 1.23.0, < 2.0) + thor (~> 1.3) + zeitwerk (~> 2.5) + language_server-protocol (3.17.0.3) + logger (1.6.1) + loofah (2.22.0) + crass (~> 1.0.2) + nokogiri (>= 1.12.0) + mail (2.8.1) + mini_mime (>= 0.1.1) + net-imap + net-pop + net-smtp + marcel (1.0.4) + matrix (0.4.2) + mini_mime (1.1.5) + minitest (5.25.1) + msgpack (1.7.3) + net-imap (0.4.16) + date + net-protocol + net-pop (0.1.2) + net-protocol + net-protocol (0.2.2) + timeout + net-scp (4.0.0) + net-ssh (>= 2.6.5, < 8.0.0) + net-sftp (4.0.0) + net-ssh (>= 5.0.0, < 8.0.0) + net-smtp (0.5.0) + net-protocol + net-ssh (7.3.0) + nio4r (2.7.3) + nokogiri (1.16.7-x86_64-linux) + racc (~> 1.4) + ostruct (0.6.0) + parallel (1.26.3) + parser (3.3.5.0) + ast (~> 2.4.1) + racc + propshaft (1.1.0) + actionpack (>= 7.0.0) + activesupport (>= 7.0.0) + rack + railties (>= 7.0.0) + psych (5.1.2) + stringio + public_suffix (6.0.1) + puma (6.4.3) + nio4r (~> 2.0) + raabro (1.4.0) + racc (1.8.1) + rack (3.1.7) + rack-session (2.0.0) + rack (>= 3.0.0) + rack-test (2.1.0) + rack (>= 1.3) + rackup (2.1.0) + rack (>= 3) + webrick (~> 1.8) + rails (8.0.0.beta1) + actioncable (= 8.0.0.beta1) + actionmailbox (= 8.0.0.beta1) + actionmailer (= 8.0.0.beta1) + actionpack (= 8.0.0.beta1) + actiontext (= 8.0.0.beta1) + actionview (= 8.0.0.beta1) + activejob (= 8.0.0.beta1) + activemodel (= 8.0.0.beta1) + activerecord (= 8.0.0.beta1) + activestorage (= 8.0.0.beta1) + activesupport (= 8.0.0.beta1) + bundler (>= 1.15.0) + railties (= 8.0.0.beta1) + rails-dom-testing (2.2.0) + activesupport (>= 5.0.0) + minitest + nokogiri (>= 1.6) + rails-html-sanitizer (1.6.0) + loofah (~> 2.21) + nokogiri (~> 1.14) + railties (8.0.0.beta1) + actionpack (= 8.0.0.beta1) + activesupport (= 8.0.0.beta1) + irb (~> 1.13) + rackup (>= 1.0.0) + rake (>= 12.2) + thor (~> 1.0, >= 1.2.2) + zeitwerk (~> 2.6) + rainbow (3.1.1) + rake (13.2.1) + rdoc (6.7.0) + psych (>= 4.0.0) + regexp_parser (2.9.2) + reline (0.5.10) + io-console (~> 0.5) + rexml (3.3.8) + rubocop (1.66.1) + json (~> 2.3) + language_server-protocol (>= 3.17.0) + parallel (~> 1.10) + parser (>= 3.3.0.2) + rainbow (>= 2.2.2, < 4.0) + regexp_parser (>= 2.4, < 3.0) + rubocop-ast (>= 1.32.2, < 2.0) + ruby-progressbar (~> 1.7) + unicode-display_width (>= 2.4.0, < 3.0) + rubocop-ast (1.32.3) + parser (>= 3.3.1.0) + rubocop-minitest (0.36.0) + rubocop (>= 1.61, < 2.0) + rubocop-ast (>= 1.31.1, < 2.0) + rubocop-performance (1.22.1) + rubocop (>= 1.48.1, < 2.0) + rubocop-ast (>= 1.31.1, < 2.0) + rubocop-rails (2.26.2) + activesupport (>= 4.2.0) + rack (>= 1.1) + rubocop (>= 1.52.0, < 2.0) + rubocop-ast (>= 1.31.1, < 2.0) + rubocop-rails-omakase (1.0.0) + rubocop + rubocop-minitest + rubocop-performance + rubocop-rails + ruby-progressbar (1.13.0) + rubyzip (2.3.2) + securerandom (0.3.1) + selenium-webdriver (4.25.0) + base64 (~> 0.2) + logger (~> 1.4) + rexml (~> 3.2, >= 3.2.5) + rubyzip (>= 1.2.2, < 3.0) + websocket (~> 1.0) + solid_cable (3.0.2) + actioncable (>= 7.2) + activejob (>= 7.2) + activerecord (>= 7.2) + railties (>= 7.2) + solid_cache (1.0.6) + activejob (>= 7.2) + activerecord (>= 7.2) + railties (>= 7.2) + solid_queue (1.0.0) + activejob (>= 7.1) + activerecord (>= 7.1) + concurrent-ruby (>= 1.3.1) + fugit (~> 1.11.0) + railties (>= 7.1) + thor (~> 1.3.1) + sqlite3 (2.1.0-x86_64-linux-gnu) + sshkit (1.23.1) + base64 + net-scp (>= 1.1.2) + net-sftp (>= 2.1.2) + net-ssh (>= 2.8.0) + ostruct + stimulus-rails (1.3.4) + railties (>= 6.0.0) + stringio (3.1.1) + thor (1.3.2) + thruster (0.1.8-x86_64-linux) + timeout (0.4.1) + turbo-rails (2.0.10) + actionpack (>= 6.0.0) + railties (>= 6.0.0) + tzinfo (2.0.6) + concurrent-ruby (~> 1.0) + unicode-display_width (2.6.0) + uri (0.13.1) + useragent (0.16.10) + web-console (4.2.1) + actionview (>= 6.0.0) + activemodel (>= 6.0.0) + bindex (>= 0.4.0) + railties (>= 6.0.0) + webrick (1.8.2) + websocket (1.2.11) + websocket-driver (0.7.6) + websocket-extensions (>= 0.1.0) + websocket-extensions (0.1.5) + xpath (3.2.0) + nokogiri (~> 1.8) + zeitwerk (2.7.0) + +PLATFORMS + x86_64-linux + +DEPENDENCIES + bootsnap + brakeman + capybara + debug + importmap-rails + jbuilder + kamal (>= 2.0.0.rc2) + propshaft + puma (>= 5.0) + rails (~> 8.0.0.beta1) + rubocop-rails-omakase + selenium-webdriver + solid_cable + solid_cache + solid_queue + sqlite3 (>= 2.1) + stimulus-rails + thruster + turbo-rails + tzinfo-data + web-console + +BUNDLED WITH + 2.4.19 diff --git a/test/fixtures/deploy-rails-8/README.md b/test/fixtures/deploy-rails-8/README.md new file mode 100644 index 0000000000..7db80e4ca1 --- /dev/null +++ b/test/fixtures/deploy-rails-8/README.md @@ -0,0 +1,24 @@ +# README + +This README would normally document whatever steps are necessary to get the +application up and running. + +Things you may want to cover: + +* Ruby version + +* System dependencies + +* Configuration + +* Database creation + +* Database initialization + +* How to run the test suite + +* Services (job queues, cache servers, search engines, etc.) + +* Deployment instructions + +* ... diff --git a/test/fixtures/deploy-rails-8/Rakefile b/test/fixtures/deploy-rails-8/Rakefile new file mode 100644 index 0000000000..9a5ea7383a --- /dev/null +++ b/test/fixtures/deploy-rails-8/Rakefile @@ -0,0 +1,6 @@ +# Add your own tasks in files placed in lib/tasks ending in .rake, +# for example lib/tasks/capistrano.rake, and they will automatically be available to Rake. + +require_relative "config/application" + +Rails.application.load_tasks diff --git a/test/fixtures/deploy-rails-8/app/assets/images/.keep b/test/fixtures/deploy-rails-8/app/assets/images/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-8/app/assets/stylesheets/application.css b/test/fixtures/deploy-rails-8/app/assets/stylesheets/application.css new file mode 100644 index 0000000000..fe93333c0f --- /dev/null +++ b/test/fixtures/deploy-rails-8/app/assets/stylesheets/application.css @@ -0,0 +1,10 @@ +/* + * This is a manifest file that'll be compiled into application.css. + * + * With Propshaft, assets are served efficiently without preprocessing steps. You can still include + * application-wide styles in this file, but keep in mind that CSS precedence will follow the standard + * cascading order, meaning styles declared later in the document or manifest will override earlier ones, + * depending on specificity. + * + * Consider organizing styles into separate files for maintainability. + */ diff --git a/test/fixtures/deploy-rails-8/app/controllers/application_controller.rb b/test/fixtures/deploy-rails-8/app/controllers/application_controller.rb new file mode 100644 index 0000000000..0d95db22b4 --- /dev/null +++ b/test/fixtures/deploy-rails-8/app/controllers/application_controller.rb @@ -0,0 +1,4 @@ +class ApplicationController < ActionController::Base + # Only allow modern browsers supporting webp images, web push, badges, import maps, CSS nesting, and CSS :has. + allow_browser versions: :modern +end diff --git a/test/fixtures/deploy-rails-8/app/controllers/concerns/.keep b/test/fixtures/deploy-rails-8/app/controllers/concerns/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-8/app/helpers/application_helper.rb b/test/fixtures/deploy-rails-8/app/helpers/application_helper.rb new file mode 100644 index 0000000000..de6be7945c --- /dev/null +++ b/test/fixtures/deploy-rails-8/app/helpers/application_helper.rb @@ -0,0 +1,2 @@ +module ApplicationHelper +end diff --git a/test/fixtures/deploy-rails-8/app/jobs/application_job.rb b/test/fixtures/deploy-rails-8/app/jobs/application_job.rb new file mode 100644 index 0000000000..d394c3d106 --- /dev/null +++ b/test/fixtures/deploy-rails-8/app/jobs/application_job.rb @@ -0,0 +1,7 @@ +class ApplicationJob < ActiveJob::Base + # Automatically retry jobs that encountered a deadlock + # retry_on ActiveRecord::Deadlocked + + # Most jobs are safe to ignore if the underlying records are no longer available + # discard_on ActiveJob::DeserializationError +end diff --git a/test/fixtures/deploy-rails-8/app/mailers/application_mailer.rb b/test/fixtures/deploy-rails-8/app/mailers/application_mailer.rb new file mode 100644 index 0000000000..3c34c8148f --- /dev/null +++ b/test/fixtures/deploy-rails-8/app/mailers/application_mailer.rb @@ -0,0 +1,4 @@ +class ApplicationMailer < ActionMailer::Base + default from: "from@example.com" + layout "mailer" +end diff --git a/test/fixtures/deploy-rails-8/app/models/application_record.rb b/test/fixtures/deploy-rails-8/app/models/application_record.rb new file mode 100644 index 0000000000..b63caeb8a5 --- /dev/null +++ b/test/fixtures/deploy-rails-8/app/models/application_record.rb @@ -0,0 +1,3 @@ +class ApplicationRecord < ActiveRecord::Base + primary_abstract_class +end diff --git a/test/fixtures/deploy-rails-8/app/models/concerns/.keep b/test/fixtures/deploy-rails-8/app/models/concerns/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-8/app/views/layouts/application.html.erb b/test/fixtures/deploy-rails-8/app/views/layouts/application.html.erb new file mode 100644 index 0000000000..2f7747a7b3 --- /dev/null +++ b/test/fixtures/deploy-rails-8/app/views/layouts/application.html.erb @@ -0,0 +1,27 @@ + + + + <%= content_for(:title) || "Helkp" %> + + + + <%= csrf_meta_tags %> + <%= csp_meta_tag %> + + <%= yield :head %> + + <%# Enable PWA manifest for installable apps (make sure to enable in config/routes.rb too!) %> + <%#= tag.link rel: "manifest", href: pwa_manifest_path(format: :json) %> + + + + + + <%# Includes all stylesheet files in app/views/stylesheets %> + <%= stylesheet_link_tag :app, "data-turbo-track": "reload" %> + + + + <%= yield %> + + diff --git a/test/fixtures/deploy-rails-8/app/views/layouts/mailer.html.erb b/test/fixtures/deploy-rails-8/app/views/layouts/mailer.html.erb new file mode 100644 index 0000000000..3aac9002ed --- /dev/null +++ b/test/fixtures/deploy-rails-8/app/views/layouts/mailer.html.erb @@ -0,0 +1,13 @@ + + + + + + + + + <%= yield %> + + diff --git a/test/fixtures/deploy-rails-8/app/views/layouts/mailer.text.erb b/test/fixtures/deploy-rails-8/app/views/layouts/mailer.text.erb new file mode 100644 index 0000000000..37f0bddbd7 --- /dev/null +++ b/test/fixtures/deploy-rails-8/app/views/layouts/mailer.text.erb @@ -0,0 +1 @@ +<%= yield %> diff --git a/test/fixtures/deploy-rails-8/app/views/pwa/manifest.json.erb b/test/fixtures/deploy-rails-8/app/views/pwa/manifest.json.erb new file mode 100644 index 0000000000..ed7901c4ff --- /dev/null +++ b/test/fixtures/deploy-rails-8/app/views/pwa/manifest.json.erb @@ -0,0 +1,22 @@ +{ + "name": "Helkp", + "icons": [ + { + "src": "/icon.png", + "type": "image/png", + "sizes": "512x512" + }, + { + "src": "/icon.png", + "type": "image/png", + "sizes": "512x512", + "purpose": "maskable" + } + ], + "start_url": "/", + "display": "standalone", + "scope": "/", + "description": "Helkp.", + "theme_color": "red", + "background_color": "red" +} diff --git a/test/fixtures/deploy-rails-8/app/views/pwa/service-worker.js b/test/fixtures/deploy-rails-8/app/views/pwa/service-worker.js new file mode 100644 index 0000000000..b3a13fb7bb --- /dev/null +++ b/test/fixtures/deploy-rails-8/app/views/pwa/service-worker.js @@ -0,0 +1,26 @@ +// Add a service worker for processing Web Push notifications: +// +// self.addEventListener("push", async (event) => { +// const { title, options } = await event.data.json() +// event.waitUntil(self.registration.showNotification(title, options)) +// }) +// +// self.addEventListener("notificationclick", function(event) { +// event.notification.close() +// event.waitUntil( +// clients.matchAll({ type: "window" }).then((clientList) => { +// for (let i = 0; i < clientList.length; i++) { +// let client = clientList[i] +// let clientPath = (new URL(client.url)).pathname +// +// if (clientPath == event.notification.data.path && "focus" in client) { +// return client.focus() +// } +// } +// +// if (clients.openWindow) { +// return clients.openWindow(event.notification.data.path) +// } +// }) +// ) +// }) diff --git a/test/fixtures/deploy-rails-8/bin/brakeman b/test/fixtures/deploy-rails-8/bin/brakeman new file mode 100755 index 0000000000..ace1c9ba08 --- /dev/null +++ b/test/fixtures/deploy-rails-8/bin/brakeman @@ -0,0 +1,7 @@ +#!/usr/bin/env ruby +require "rubygems" +require "bundler/setup" + +ARGV.unshift("--ensure-latest") + +load Gem.bin_path("brakeman", "brakeman") diff --git a/test/fixtures/deploy-rails-8/bin/bundle b/test/fixtures/deploy-rails-8/bin/bundle new file mode 100755 index 0000000000..42c7fd7c5c --- /dev/null +++ b/test/fixtures/deploy-rails-8/bin/bundle @@ -0,0 +1,109 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +# +# This file was generated by Bundler. +# +# The application 'bundle' is installed as part of a gem, and +# this file is here to facilitate running it. +# + +require "rubygems" + +m = Module.new do + module_function + + def invoked_as_script? + File.expand_path($0) == File.expand_path(__FILE__) + end + + def env_var_version + ENV["BUNDLER_VERSION"] + end + + def cli_arg_version + return unless invoked_as_script? # don't want to hijack other binstubs + return unless "update".start_with?(ARGV.first || " ") # must be running `bundle update` + bundler_version = nil + update_index = nil + ARGV.each_with_index do |a, i| + if update_index && update_index.succ == i && a =~ Gem::Version::ANCHORED_VERSION_PATTERN + bundler_version = a + end + next unless a =~ /\A--bundler(?:[= ](#{Gem::Version::VERSION_PATTERN}))?\z/ + bundler_version = $1 + update_index = i + end + bundler_version + end + + def gemfile + gemfile = ENV["BUNDLE_GEMFILE"] + return gemfile if gemfile && !gemfile.empty? + + File.expand_path("../Gemfile", __dir__) + end + + def lockfile + lockfile = + case File.basename(gemfile) + when "gems.rb" then gemfile.sub(/\.rb$/, ".locked") + else "#{gemfile}.lock" + end + File.expand_path(lockfile) + end + + def lockfile_version + return unless File.file?(lockfile) + lockfile_contents = File.read(lockfile) + return unless lockfile_contents =~ /\n\nBUNDLED WITH\n\s{2,}(#{Gem::Version::VERSION_PATTERN})\n/ + Regexp.last_match(1) + end + + def bundler_requirement + @bundler_requirement ||= + env_var_version || + cli_arg_version || + bundler_requirement_for(lockfile_version) + end + + def bundler_requirement_for(version) + return "#{Gem::Requirement.default}.a" unless version + + bundler_gem_version = Gem::Version.new(version) + + bundler_gem_version.approximate_recommendation + end + + def load_bundler! + ENV["BUNDLE_GEMFILE"] ||= gemfile + + activate_bundler + end + + def activate_bundler + gem_error = activation_error_handling do + gem "bundler", bundler_requirement + end + return if gem_error.nil? + require_error = activation_error_handling do + require "bundler/version" + end + return if require_error.nil? && Gem::Requirement.new(bundler_requirement).satisfied_by?(Gem::Version.new(Bundler::VERSION)) + warn "Activating bundler (#{bundler_requirement}) failed:\n#{gem_error.message}\n\nTo install the version of bundler this project requires, run `gem install bundler -v '#{bundler_requirement}'`" + exit 42 + end + + def activation_error_handling + yield + nil + rescue StandardError, LoadError => e + e + end +end + +m.load_bundler! + +if m.invoked_as_script? + load Gem.bin_path("bundler", "bundle") +end diff --git a/test/fixtures/deploy-rails-8/bin/dev b/test/fixtures/deploy-rails-8/bin/dev new file mode 100755 index 0000000000..5f91c20545 --- /dev/null +++ b/test/fixtures/deploy-rails-8/bin/dev @@ -0,0 +1,2 @@ +#!/usr/bin/env ruby +exec "./bin/rails", "server", *ARGV diff --git a/test/fixtures/deploy-rails-8/bin/docker-entrypoint b/test/fixtures/deploy-rails-8/bin/docker-entrypoint new file mode 100755 index 0000000000..57567d69b4 --- /dev/null +++ b/test/fixtures/deploy-rails-8/bin/docker-entrypoint @@ -0,0 +1,14 @@ +#!/bin/bash -e + +# Enable jemalloc for reduced memory usage and latency. +if [ -z "${LD_PRELOAD+x}" ]; then + LD_PRELOAD=$(find /usr/lib -name libjemalloc.so.2 -print -quit) + export LD_PRELOAD +fi + +# If running the rails server then create or migrate existing database +if [ "${@: -2:1}" == "./bin/rails" ] && [ "${@: -1:1}" == "server" ]; then + ./bin/rails db:prepare +fi + +exec "${@}" diff --git a/test/fixtures/deploy-rails-8/bin/kamal b/test/fixtures/deploy-rails-8/bin/kamal new file mode 100755 index 0000000000..cbe59b95ed --- /dev/null +++ b/test/fixtures/deploy-rails-8/bin/kamal @@ -0,0 +1,27 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +# +# This file was generated by Bundler. +# +# The application 'kamal' is installed as part of a gem, and +# this file is here to facilitate running it. +# + +ENV["BUNDLE_GEMFILE"] ||= File.expand_path("../Gemfile", __dir__) + +bundle_binstub = File.expand_path("bundle", __dir__) + +if File.file?(bundle_binstub) + if File.read(bundle_binstub, 300).include?("This file was generated by Bundler") + load(bundle_binstub) + else + abort("Your `bin/bundle` was not generated by Bundler, so this binstub cannot run. +Replace `bin/bundle` by running `bundle binstubs bundler --force`, then run this command again.") + end +end + +require "rubygems" +require "bundler/setup" + +load Gem.bin_path("kamal", "kamal") diff --git a/test/fixtures/deploy-rails-8/bin/rails b/test/fixtures/deploy-rails-8/bin/rails new file mode 100755 index 0000000000..efc0377492 --- /dev/null +++ b/test/fixtures/deploy-rails-8/bin/rails @@ -0,0 +1,4 @@ +#!/usr/bin/env ruby +APP_PATH = File.expand_path("../config/application", __dir__) +require_relative "../config/boot" +require "rails/commands" diff --git a/test/fixtures/deploy-rails-8/bin/rake b/test/fixtures/deploy-rails-8/bin/rake new file mode 100755 index 0000000000..4fbf10b960 --- /dev/null +++ b/test/fixtures/deploy-rails-8/bin/rake @@ -0,0 +1,4 @@ +#!/usr/bin/env ruby +require_relative "../config/boot" +require "rake" +Rake.application.run diff --git a/test/fixtures/deploy-rails-8/bin/rubocop b/test/fixtures/deploy-rails-8/bin/rubocop new file mode 100755 index 0000000000..40330c0ff1 --- /dev/null +++ b/test/fixtures/deploy-rails-8/bin/rubocop @@ -0,0 +1,8 @@ +#!/usr/bin/env ruby +require "rubygems" +require "bundler/setup" + +# explicit rubocop config increases performance slightly while avoiding config confusion. +ARGV.unshift("--config", File.expand_path("../.rubocop.yml", __dir__)) + +load Gem.bin_path("rubocop", "rubocop") diff --git a/test/fixtures/deploy-rails-8/bin/setup b/test/fixtures/deploy-rails-8/bin/setup new file mode 100755 index 0000000000..15347c597e --- /dev/null +++ b/test/fixtures/deploy-rails-8/bin/setup @@ -0,0 +1,35 @@ +#!/usr/bin/env ruby +require "fileutils" + +APP_ROOT = File.expand_path("..", __dir__) +APP_NAME = "helkp" + +def system!(*args) + system(*args, exception: true) +end + +FileUtils.chdir APP_ROOT do + # This script is a way to set up or update your development environment automatically. + # This script is idempotent, so that you can run it at any time and get an expectable outcome. + # Add necessary setup steps to this file. + + puts "== Installing dependencies ==" + system("bundle check") || system!("bundle install") + + # puts "\n== Copying sample files ==" + # unless File.exist?("config/database.yml") + # FileUtils.cp "config/database.yml.sample", "config/database.yml" + # end + + puts "\n== Preparing database ==" + system! "bin/rails db:prepare" + + puts "\n== Removing old logs and tempfiles ==" + system! "bin/rails log:clear tmp:clear" + + unless ARGV.include?("--skip-server") + puts "\n== Starting development server ==" + STDOUT.flush # flush the output before exec(2) so that it displays + exec "bin/dev" + end +end diff --git a/test/fixtures/deploy-rails-8/bin/thrust b/test/fixtures/deploy-rails-8/bin/thrust new file mode 100755 index 0000000000..36bde2d832 --- /dev/null +++ b/test/fixtures/deploy-rails-8/bin/thrust @@ -0,0 +1,5 @@ +#!/usr/bin/env ruby +require "rubygems" +require "bundler/setup" + +load Gem.bin_path("thruster", "thrust") diff --git a/test/fixtures/deploy-rails-8/config.ru b/test/fixtures/deploy-rails-8/config.ru new file mode 100644 index 0000000000..4a3c09a688 --- /dev/null +++ b/test/fixtures/deploy-rails-8/config.ru @@ -0,0 +1,6 @@ +# This file is used by Rack-based servers to start the application. + +require_relative "config/environment" + +run Rails.application +Rails.application.load_server diff --git a/test/fixtures/deploy-rails-8/config/application.rb b/test/fixtures/deploy-rails-8/config/application.rb new file mode 100644 index 0000000000..5f1ae82bda --- /dev/null +++ b/test/fixtures/deploy-rails-8/config/application.rb @@ -0,0 +1,27 @@ +require_relative "boot" + +require "rails/all" + +# Require the gems listed in Gemfile, including any gems +# you've limited to :test, :development, or :production. +Bundler.require(*Rails.groups) + +module Helkp + class Application < Rails::Application + # Initialize configuration defaults for originally generated Rails version. + config.load_defaults 8.0 + + # Please, add to the `ignore` list any other `lib` subdirectories that do + # not contain `.rb` files, or that should not be reloaded or eager loaded. + # Common ones are `templates`, `generators`, or `middleware`, for example. + config.autoload_lib(ignore: %w[assets tasks]) + + # Configuration for the application, engines, and railties goes here. + # + # These settings can be overridden in specific environments using the files + # in config/environments, which are processed later. + # + # config.time_zone = "Central Time (US & Canada)" + # config.eager_load_paths << Rails.root.join("extras") + end +end diff --git a/test/fixtures/deploy-rails-8/config/boot.rb b/test/fixtures/deploy-rails-8/config/boot.rb new file mode 100644 index 0000000000..988a5ddc46 --- /dev/null +++ b/test/fixtures/deploy-rails-8/config/boot.rb @@ -0,0 +1,4 @@ +ENV["BUNDLE_GEMFILE"] ||= File.expand_path("../Gemfile", __dir__) + +require "bundler/setup" # Set up gems listed in the Gemfile. +require "bootsnap/setup" # Speed up boot time by caching expensive operations. diff --git a/test/fixtures/deploy-rails-8/config/cable.yml b/test/fixtures/deploy-rails-8/config/cable.yml new file mode 100644 index 0000000000..25abd263fb --- /dev/null +++ b/test/fixtures/deploy-rails-8/config/cable.yml @@ -0,0 +1,10 @@ +development: + adapter: async + +test: + adapter: test + +production: + adapter: redis + url: <%= ENV.fetch("REDIS_URL") { "redis://localhost:6379/1" } %> + channel_prefix: helkp_production diff --git a/test/fixtures/deploy-rails-8/config/credentials.yml.enc b/test/fixtures/deploy-rails-8/config/credentials.yml.enc new file mode 100644 index 0000000000..3b76547d41 --- /dev/null +++ b/test/fixtures/deploy-rails-8/config/credentials.yml.enc @@ -0,0 +1 @@ +tNT02O8Xba+tAfED29z+BDnG9NgzhlZvQxyMHNJGaLeugvHvfSG5RxO5KmvU7rNE0rWT8F078rP2qtZdxfZs1TH6wmYN4D0VH7CoNYEIz5ddbPDrXR/8689eCLKJlaSHxof/toVhqofCNdBF4l8dlQLWgXaytjpX0YnX/ZVVLo4NaPcH1roAySEJ9ECzy83FiDporIz7LgRJRbCQbj5hBFvHGVevFD3/xOR1d9eTUgT+iBhRBUgT8vb+rkAWPtUIbi2p2KILglrq9D7YMlokWYRUvvuUKS7fMCVPk8josz2L1EhVKd2X+YIAszH2eZKfltYiyfDx3+waLDJVe3dNPoz4scjp5jX0QfB/cxm25w5iuSwmSlJcZKMypO5tZ7pOQ2Rd0vsKo2QrrvI6SGKcIErYCF34HEMIe3aj8WJUB7gsSQXRRcWwwHtKUXsgAuuHRSlUq3EAtqNaoRZdSXBB6zvUa2OIksCPptDYkEcbeH7jnL+eaC+zgjuX--GGzRYapcAGQ8eCga--68JXDOlpBsbTpFUYNJud0g== \ No newline at end of file diff --git a/test/fixtures/deploy-rails-8/config/database.yml b/test/fixtures/deploy-rails-8/config/database.yml new file mode 100644 index 0000000000..2640cb5f30 --- /dev/null +++ b/test/fixtures/deploy-rails-8/config/database.yml @@ -0,0 +1,41 @@ +# SQLite. Versions 3.8.0 and up are supported. +# gem install sqlite3 +# +# Ensure the SQLite 3 gem is defined in your Gemfile +# gem "sqlite3" +# +default: &default + adapter: sqlite3 + pool: <%= ENV.fetch("RAILS_MAX_THREADS") { 5 } %> + timeout: 5000 + +development: + <<: *default + database: storage/development.sqlite3 + +# Warning: The database defined as "test" will be erased and +# re-generated from your development database when you run "rake". +# Do not set this db to the same as development or production. +test: + <<: *default + database: storage/test.sqlite3 + + +# Store production database in the storage/ directory, which by default +# is mounted as a persistent Docker volume in config/deploy.yml. +production: + primary: + <<: *default + database: storage/production.sqlite3 + cache: + <<: *default + database: storage/production_cache.sqlite3 + migrations_paths: db/cache_migrate + queue: + <<: *default + database: storage/production_queue.sqlite3 + migrations_paths: db/queue_migrate + cable: + <<: *default + database: storage/production_cable.sqlite3 + migrations_paths: db/cable_migrate diff --git a/test/fixtures/deploy-rails-8/config/deploy.yml b/test/fixtures/deploy-rails-8/config/deploy.yml new file mode 100644 index 0000000000..85a234ad61 --- /dev/null +++ b/test/fixtures/deploy-rails-8/config/deploy.yml @@ -0,0 +1,112 @@ +# Name of your application. Used to uniquely configure containers. +service: helkp + +# Name of the container image. +image: your-user/helkp + +# Deploy to these servers. +servers: + web: + - 192.168.0.1 + # job: + # hosts: + # - 192.168.0.1 + # cmd: bin/jobs + +# Enable SSL auto certification via Let's Encrypt (and allow for multiple apps on one server). +# Set ssl: false if using something like Cloudflare to terminate SSL (but keep host!). +proxy: + ssl: true + host: app.example.com + +# Credentials for your image host. +registry: + # Specify the registry server, if you're not using Docker Hub + # server: registry.digitalocean.com / ghcr.io / ... + username: your-user + + # Always use an access token rather than real password when possible. + password: + - KAMAL_REGISTRY_PASSWORD + +# Inject ENV variables into containers (secrets come from .kamal/secrets). +env: + secret: + - RAILS_MASTER_KEY + clear: + # Run the Solid Queue Supervisor inside the web server's Puma process to do jobs. + # When you start using multiple servers, you should split out job processing to a dedicated machine. + SOLID_QUEUE_IN_PUMA: true + + # Set number of processes dedicated to Solid Queue (default: 1) + # JOB_CONCURRENCY: 3 + + # Set number of cores available to the application on each server (default: 1). + # WEB_CONCURRENCY: 2 + + # Match this to any external database server to configure Active Record correctly + # DB_HOST: 192.168.0.2 + + # Log everything from Rails + # RAILS_LOG_LEVEL: debug + +# Aliases are triggered with "bin/kamal ". You can overwrite arguments on invocation: +# "bin/kamal logs -r job" will tail logs from the first server in the job section. +aliases: + console: app exec --interactive --reuse "bin/rails console" + shell: app exec --interactive --reuse "bash" + logs: app logs -f + dbc: app exec --interactive --reuse "bin/rails dbconsole" + + +# Use a persistent storage volume for sqlite database files and local Active Storage files. +# Recommended to change this to a mounted volume path that is backed up off server. +volumes: + - "helkp_storage:/rails/storage" + + +# Bridge fingerprinted assets, like JS and CSS, between versions to avoid +# hitting 404 on in-flight requests. Combines all files from new and old +# version inside the asset_path. +asset_path: /rails/public/assets + +# Configure the image builder. +builder: + arch: amd64 + + # # Build image via remote server (useful for faster amd64 builds on arm64 computers) + # remote: ssh://docker@docker-builder-server + # + # # Pass arguments and secrets to the Docker build process + # args: + # RUBY_VERSION: ruby-3.2.3 + # secrets: + # - GITHUB_TOKEN + # - RAILS_MASTER_KEY + +# Use a different ssh user than root +# ssh: +# user: app + +# Use accessory services (secrets come from .kamal/secrets). +# accessories: +# db: +# image: mysql:8.0 +# host: 192.168.0.2 +# port: 3306 +# env: +# clear: +# MYSQL_ROOT_HOST: '%' +# secret: +# - MYSQL_ROOT_PASSWORD +# files: +# - config/mysql/production.cnf:/etc/mysql/my.cnf +# - db/production.sql:/docker-entrypoint-initdb.d/setup.sql +# directories: +# - data:/var/lib/mysql +# redis: +# image: redis:7.0 +# host: 192.168.0.2 +# port: 6379 +# directories: +# - data:/data diff --git a/test/fixtures/deploy-rails-8/config/environment.rb b/test/fixtures/deploy-rails-8/config/environment.rb new file mode 100644 index 0000000000..cac5315775 --- /dev/null +++ b/test/fixtures/deploy-rails-8/config/environment.rb @@ -0,0 +1,5 @@ +# Load the Rails application. +require_relative "application" + +# Initialize the Rails application. +Rails.application.initialize! diff --git a/test/fixtures/deploy-rails-8/config/environments/development.rb b/test/fixtures/deploy-rails-8/config/environments/development.rb new file mode 100644 index 0000000000..4cc21c4ebe --- /dev/null +++ b/test/fixtures/deploy-rails-8/config/environments/development.rb @@ -0,0 +1,72 @@ +require "active_support/core_ext/integer/time" + +Rails.application.configure do + # Settings specified here will take precedence over those in config/application.rb. + + # Make code changes take effect immediately without server restart. + config.enable_reloading = true + + # Do not eager load code on boot. + config.eager_load = false + + # Show full error reports. + config.consider_all_requests_local = true + + # Enable server timing. + config.server_timing = true + + # Enable/disable Action Controller caching. By default Action Controller caching is disabled. + # Run rails dev:cache to toggle Action Controller caching. + if Rails.root.join("tmp/caching-dev.txt").exist? + config.action_controller.perform_caching = true + config.action_controller.enable_fragment_cache_logging = true + config.public_file_server.headers = { "cache-control" => "public, max-age=#{2.days.to_i}" } + else + config.action_controller.perform_caching = false + end + + # Change to :null_store to avoid any caching. + config.cache_store = :memory_store + + # Store uploaded files on the local file system (see config/storage.yml for options). + config.active_storage.service = :local + + # Don't care if the mailer can't send. + config.action_mailer.raise_delivery_errors = false + + # Make template changes take effect immediately. + config.action_mailer.perform_caching = false + + # Set localhost to be used by links generated in mailer templates. + config.action_mailer.default_url_options = { host: "localhost", port: 3000 } + + # Print deprecation notices to the Rails logger. + config.active_support.deprecation = :log + + # Raise an error on page load if there are pending migrations. + config.active_record.migration_error = :page_load + + # Highlight code that triggered database queries in logs. + config.active_record.verbose_query_logs = true + + # Append comments with runtime information tags to SQL queries in logs. + config.active_record.query_log_tags_enabled = true + + # Highlight code that enqueued background job in logs. + config.active_job.verbose_enqueue_logs = true + + # Raises error for missing translations. + # config.i18n.raise_on_missing_translations = true + + # Annotate rendered view with file names. + config.action_view.annotate_rendered_view_with_filenames = true + + # Uncomment if you wish to allow Action Cable access from any origin. + # config.action_cable.disable_request_forgery_protection = true + + # Raise error when a before_action's only/except options reference missing actions. + config.action_controller.raise_on_missing_callback_actions = true + + # Apply autocorrection by RuboCop to files generated by `bin/rails generate`. + # config.generators.apply_rubocop_autocorrect_after_generate! +end diff --git a/test/fixtures/deploy-rails-8/config/environments/production.rb b/test/fixtures/deploy-rails-8/config/environments/production.rb new file mode 100644 index 0000000000..1749607768 --- /dev/null +++ b/test/fixtures/deploy-rails-8/config/environments/production.rb @@ -0,0 +1,89 @@ +require "active_support/core_ext/integer/time" + +Rails.application.configure do + # Settings specified here will take precedence over those in config/application.rb. + + # Code is not reloaded between requests. + config.enable_reloading = false + + # Eager load code on boot for better performance and memory savings (ignored by Rake tasks). + config.eager_load = true + + # Full error reports are disabled. + config.consider_all_requests_local = false + + # Turn on fragment caching in view templates. + config.action_controller.perform_caching = true + + # Cache assets for far-future expiry since they are all digest stamped. + config.public_file_server.headers = { "cache-control" => "public, max-age=#{1.year.to_i}" } + + # Enable serving of images, stylesheets, and JavaScripts from an asset server. + # config.asset_host = "http://assets.example.com" + + # Store uploaded files on the local file system (see config/storage.yml for options). + config.active_storage.service = :local + + # Assume all access to the app is happening through a SSL-terminating reverse proxy. + config.assume_ssl = true + + # Force all access to the app over SSL, use Strict-Transport-Security, and use secure cookies. + config.force_ssl = true + + # Skip http-to-https redirect for the default health check endpoint. + # config.ssl_options = { redirect: { exclude: ->(request) { request.path == "/up" } } } + + # Log to STDOUT with the current request id as a default log tag. + config.log_tags = [ :request_id ] + config.logger = ActiveSupport::TaggedLogging.logger(STDOUT) + + # Change to "debug" to log everything (including potentially personally-identifiable information!) + config.log_level = ENV.fetch("RAILS_LOG_LEVEL", "info") + + # Prevent health checks from clogging up the logs. + config.silence_healthcheck_path = "/up" + + # Don't log any deprecations. + config.active_support.report_deprecations = false + + # Replace the default in-process memory cache store with a durable alternative. + # config.cache_store = :mem_cache_store + + # Replace the default in-process and non-durable queuing backend for Active Job. + # config.active_job.queue_adapter = :resque + + # Ignore bad email addresses and do not raise email delivery errors. + # Set this to true and configure the email server for immediate delivery to raise delivery errors. + # config.action_mailer.raise_delivery_errors = false + + # Set host to be used by links generated in mailer templates. + config.action_mailer.default_url_options = { host: "example.com" } + + # Specify outgoing SMTP server. Remember to add smtp/* credentials via rails credentials:edit. + # config.action_mailer.smtp_settings = { + # user_name: Rails.application.credentials.dig(:smtp, :user_name), + # password: Rails.application.credentials.dig(:smtp, :password), + # address: "smtp.example.com", + # port: 587, + # authentication: :plain + # } + + # Enable locale fallbacks for I18n (makes lookups for any locale fall back to + # the I18n.default_locale when a translation cannot be found). + config.i18n.fallbacks = true + + # Do not dump schema after migrations. + config.active_record.dump_schema_after_migration = false + + # Only use :id for inspections in production. + config.active_record.attributes_for_inspect = [ :id ] + + # Enable DNS rebinding protection and other `Host` header attacks. + # config.hosts = [ + # "example.com", # Allow requests from example.com + # /.*\.example\.com/ # Allow requests from subdomains like `www.example.com` + # ] + # + # Skip DNS rebinding protection for the default health check endpoint. + # config.host_authorization = { exclude: ->(request) { request.path == "/up" } } +end diff --git a/test/fixtures/deploy-rails-8/config/environments/test.rb b/test/fixtures/deploy-rails-8/config/environments/test.rb new file mode 100644 index 0000000000..c2095b1174 --- /dev/null +++ b/test/fixtures/deploy-rails-8/config/environments/test.rb @@ -0,0 +1,53 @@ +# The test environment is used exclusively to run your application's +# test suite. You never need to work with it otherwise. Remember that +# your test database is "scratch space" for the test suite and is wiped +# and recreated between test runs. Don't rely on the data there! + +Rails.application.configure do + # Settings specified here will take precedence over those in config/application.rb. + + # While tests run files are not watched, reloading is not necessary. + config.enable_reloading = false + + # Eager loading loads your entire application. When running a single test locally, + # this is usually not necessary, and can slow down your test suite. However, it's + # recommended that you enable it in continuous integration systems to ensure eager + # loading is working properly before deploying your code. + config.eager_load = ENV["CI"].present? + + # Configure public file server for tests with cache-control for performance. + config.public_file_server.headers = { "cache-control" => "public, max-age=3600" } + + # Show full error reports. + config.consider_all_requests_local = true + config.cache_store = :null_store + + # Render exception templates for rescuable exceptions and raise for other exceptions. + config.action_dispatch.show_exceptions = :rescuable + + # Disable request forgery protection in test environment. + config.action_controller.allow_forgery_protection = false + + # Store uploaded files on the local file system in a temporary directory. + config.active_storage.service = :test + + # Tell Action Mailer not to deliver emails to the real world. + # The :test delivery method accumulates sent emails in the + # ActionMailer::Base.deliveries array. + config.action_mailer.delivery_method = :test + + # Set host to be used by links generated in mailer templates. + config.action_mailer.default_url_options = { host: "example.com" } + + # Print deprecation notices to the stderr. + config.active_support.deprecation = :stderr + + # Raises error for missing translations. + # config.i18n.raise_on_missing_translations = true + + # Annotate rendered view with file names. + # config.action_view.annotate_rendered_view_with_filenames = true + + # Raise error when a before_action's only/except options reference missing actions. + config.action_controller.raise_on_missing_callback_actions = true +end diff --git a/test/fixtures/deploy-rails-8/config/initializers/assets.rb b/test/fixtures/deploy-rails-8/config/initializers/assets.rb new file mode 100644 index 0000000000..487324424f --- /dev/null +++ b/test/fixtures/deploy-rails-8/config/initializers/assets.rb @@ -0,0 +1,7 @@ +# Be sure to restart your server when you modify this file. + +# Version of your assets, change this if you want to expire all your assets. +Rails.application.config.assets.version = "1.0" + +# Add additional assets to the asset load path. +# Rails.application.config.assets.paths << Emoji.images_path diff --git a/test/fixtures/deploy-rails-8/config/initializers/content_security_policy.rb b/test/fixtures/deploy-rails-8/config/initializers/content_security_policy.rb new file mode 100644 index 0000000000..b3076b38fe --- /dev/null +++ b/test/fixtures/deploy-rails-8/config/initializers/content_security_policy.rb @@ -0,0 +1,25 @@ +# Be sure to restart your server when you modify this file. + +# Define an application-wide content security policy. +# See the Securing Rails Applications Guide for more information: +# https://guides.rubyonrails.org/security.html#content-security-policy-header + +# Rails.application.configure do +# config.content_security_policy do |policy| +# policy.default_src :self, :https +# policy.font_src :self, :https, :data +# policy.img_src :self, :https, :data +# policy.object_src :none +# policy.script_src :self, :https +# policy.style_src :self, :https +# # Specify URI for violation reports +# # policy.report_uri "/csp-violation-report-endpoint" +# end +# +# # Generate session nonces for permitted importmap, inline scripts, and inline styles. +# config.content_security_policy_nonce_generator = ->(request) { request.session.id.to_s } +# config.content_security_policy_nonce_directives = %w(script-src style-src) +# +# # Report violations without enforcing the policy. +# # config.content_security_policy_report_only = true +# end diff --git a/test/fixtures/deploy-rails-8/config/initializers/filter_parameter_logging.rb b/test/fixtures/deploy-rails-8/config/initializers/filter_parameter_logging.rb new file mode 100644 index 0000000000..c0b717f7ec --- /dev/null +++ b/test/fixtures/deploy-rails-8/config/initializers/filter_parameter_logging.rb @@ -0,0 +1,8 @@ +# Be sure to restart your server when you modify this file. + +# Configure parameters to be partially matched (e.g. passw matches password) and filtered from the log file. +# Use this to limit dissemination of sensitive information. +# See the ActiveSupport::ParameterFilter documentation for supported notations and behaviors. +Rails.application.config.filter_parameters += [ + :passw, :email, :secret, :token, :_key, :crypt, :salt, :certificate, :otp, :ssn, :cvv, :cvc +] diff --git a/test/fixtures/deploy-rails-8/config/initializers/inflections.rb b/test/fixtures/deploy-rails-8/config/initializers/inflections.rb new file mode 100644 index 0000000000..3860f659ea --- /dev/null +++ b/test/fixtures/deploy-rails-8/config/initializers/inflections.rb @@ -0,0 +1,16 @@ +# Be sure to restart your server when you modify this file. + +# Add new inflection rules using the following format. Inflections +# are locale specific, and you may define rules for as many different +# locales as you wish. All of these examples are active by default: +# ActiveSupport::Inflector.inflections(:en) do |inflect| +# inflect.plural /^(ox)$/i, "\\1en" +# inflect.singular /^(ox)en/i, "\\1" +# inflect.irregular "person", "people" +# inflect.uncountable %w( fish sheep ) +# end + +# These inflection rules are supported but not enabled by default: +# ActiveSupport::Inflector.inflections(:en) do |inflect| +# inflect.acronym "RESTful" +# end diff --git a/test/fixtures/deploy-rails-8/config/locales/en.yml b/test/fixtures/deploy-rails-8/config/locales/en.yml new file mode 100644 index 0000000000..6c349ae5e3 --- /dev/null +++ b/test/fixtures/deploy-rails-8/config/locales/en.yml @@ -0,0 +1,31 @@ +# Files in the config/locales directory are used for internationalization and +# are automatically loaded by Rails. If you want to use locales other than +# English, add the necessary files in this directory. +# +# To use the locales, use `I18n.t`: +# +# I18n.t "hello" +# +# In views, this is aliased to just `t`: +# +# <%= t("hello") %> +# +# To use a different locale, set it with `I18n.locale`: +# +# I18n.locale = :es +# +# This would use the information in config/locales/es.yml. +# +# To learn more about the API, please read the Rails Internationalization guide +# at https://guides.rubyonrails.org/i18n.html. +# +# Be aware that YAML interprets the following case-insensitive strings as +# booleans: `true`, `false`, `on`, `off`, `yes`, `no`. Therefore, these strings +# must be quoted to be interpreted as strings. For example: +# +# en: +# "yes": yup +# enabled: "ON" + +en: + hello: "Hello world" diff --git a/test/fixtures/deploy-rails-8/config/puma.rb b/test/fixtures/deploy-rails-8/config/puma.rb new file mode 100644 index 0000000000..a248513b24 --- /dev/null +++ b/test/fixtures/deploy-rails-8/config/puma.rb @@ -0,0 +1,41 @@ +# This configuration file will be evaluated by Puma. The top-level methods that +# are invoked here are part of Puma's configuration DSL. For more information +# about methods provided by the DSL, see https://puma.io/puma/Puma/DSL.html. +# +# Puma starts a configurable number of processes (workers) and each process +# serves each request in a thread from an internal thread pool. +# +# You can control the number of workers using ENV["WEB_CONCURRENCY"]. You +# should only set this value when you want to run 2 or more workers. The +# default is already 1. +# +# The ideal number of threads per worker depends both on how much time the +# application spends waiting for IO operations and on how much you wish to +# prioritize throughput over latency. +# +# As a rule of thumb, increasing the number of threads will increase how much +# traffic a given process can handle (throughput), but due to CRuby's +# Global VM Lock (GVL) it has diminishing returns and will degrade the +# response time (latency) of the application. +# +# The default is set to 3 threads as it's deemed a decent compromise between +# throughput and latency for the average Rails application. +# +# Any libraries that use a connection pool or another resource pool should +# be configured to provide at least as many connections as the number of +# threads. This includes Active Record's `pool` parameter in `database.yml`. +threads_count = ENV.fetch("RAILS_MAX_THREADS", 3) +threads threads_count, threads_count + +# Specifies the `port` that Puma will listen on to receive requests; default is 3000. +port ENV.fetch("PORT", 3000) + +# Allow puma to be restarted by `bin/rails restart` command. +plugin :tmp_restart + +# Run the Solid Queue supervisor inside of Puma for single-server deployments +plugin :solid_queue if ENV["SOLID_QUEUE_IN_PUMA"] + +# Specify the PID file. Defaults to tmp/pids/server.pid in development. +# In other environments, only set the PID file if requested. +pidfile ENV["PIDFILE"] if ENV["PIDFILE"] diff --git a/test/fixtures/deploy-rails-8/config/routes.rb b/test/fixtures/deploy-rails-8/config/routes.rb new file mode 100644 index 0000000000..48254e88ed --- /dev/null +++ b/test/fixtures/deploy-rails-8/config/routes.rb @@ -0,0 +1,14 @@ +Rails.application.routes.draw do + # Define your application routes per the DSL in https://guides.rubyonrails.org/routing.html + + # Reveal health status on /up that returns 200 if the app boots with no exceptions, otherwise 500. + # Can be used by load balancers and uptime monitors to verify that the app is live. + get "up" => "rails/health#show", as: :rails_health_check + + # Render dynamic PWA files from app/views/pwa/* (remember to link manifest in application.html.erb) + # get "manifest" => "rails/pwa#manifest", as: :pwa_manifest + # get "service-worker" => "rails/pwa#service_worker", as: :pwa_service_worker + + # Defines the root path route ("/") + # root "posts#index" +end diff --git a/test/fixtures/deploy-rails-8/config/storage.yml b/test/fixtures/deploy-rails-8/config/storage.yml new file mode 100644 index 0000000000..4942ab6694 --- /dev/null +++ b/test/fixtures/deploy-rails-8/config/storage.yml @@ -0,0 +1,34 @@ +test: + service: Disk + root: <%= Rails.root.join("tmp/storage") %> + +local: + service: Disk + root: <%= Rails.root.join("storage") %> + +# Use bin/rails credentials:edit to set the AWS secrets (as aws:access_key_id|secret_access_key) +# amazon: +# service: S3 +# access_key_id: <%= Rails.application.credentials.dig(:aws, :access_key_id) %> +# secret_access_key: <%= Rails.application.credentials.dig(:aws, :secret_access_key) %> +# region: us-east-1 +# bucket: your_own_bucket-<%= Rails.env %> + +# Remember not to checkin your GCS keyfile to a repository +# google: +# service: GCS +# project: your_project +# credentials: <%= Rails.root.join("path/to/gcs.keyfile") %> +# bucket: your_own_bucket-<%= Rails.env %> + +# Use bin/rails credentials:edit to set the Azure Storage secret (as azure_storage:storage_access_key) +# microsoft: +# service: AzureStorage +# storage_account_name: your_account_name +# storage_access_key: <%= Rails.application.credentials.dig(:azure_storage, :storage_access_key) %> +# container: your_container_name-<%= Rails.env %> + +# mirror: +# service: Mirror +# primary: local +# mirrors: [ amazon, google, microsoft ] diff --git a/test/fixtures/deploy-rails-8/db/seeds.rb b/test/fixtures/deploy-rails-8/db/seeds.rb new file mode 100644 index 0000000000..4fbd6ed970 --- /dev/null +++ b/test/fixtures/deploy-rails-8/db/seeds.rb @@ -0,0 +1,9 @@ +# This file should ensure the existence of records required to run the application in every environment (production, +# development, test). The code here should be idempotent so that it can be executed at any point in every environment. +# The data can then be loaded with the bin/rails db:seed command (or created alongside the database with db:setup). +# +# Example: +# +# ["Action", "Comedy", "Drama", "Horror"].each do |genre_name| +# MovieGenre.find_or_create_by!(name: genre_name) +# end diff --git a/test/fixtures/deploy-rails-8/lib/tasks/.keep b/test/fixtures/deploy-rails-8/lib/tasks/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-8/log/.keep b/test/fixtures/deploy-rails-8/log/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-8/public/400.html b/test/fixtures/deploy-rails-8/public/400.html new file mode 100644 index 0000000000..282dbc8cc9 --- /dev/null +++ b/test/fixtures/deploy-rails-8/public/400.html @@ -0,0 +1,114 @@ + + + + + + + The server cannot process the request due to a client error (400 Bad Request) + + + + + + + + + + + + + +
+
+ +
+
+

The server cannot process the request due to a client error. Please check the request and try again. If you’re the application owner check the logs for more information.

+
+
+ + + + diff --git a/test/fixtures/deploy-rails-8/public/404.html b/test/fixtures/deploy-rails-8/public/404.html new file mode 100644 index 0000000000..c0670bc877 --- /dev/null +++ b/test/fixtures/deploy-rails-8/public/404.html @@ -0,0 +1,114 @@ + + + + + + + The page you were looking for doesn’t exist (404 Not found) + + + + + + + + + + + + + +
+
+ +
+
+

The page you were looking for doesn’t exist. You may have mistyped the address or the page may have moved. If you’re the application owner check the logs for more information.

+
+
+ + + + diff --git a/test/fixtures/deploy-rails-8/public/406-unsupported-browser.html b/test/fixtures/deploy-rails-8/public/406-unsupported-browser.html new file mode 100644 index 0000000000..9532a9ccd0 --- /dev/null +++ b/test/fixtures/deploy-rails-8/public/406-unsupported-browser.html @@ -0,0 +1,114 @@ + + + + + + + Your browser is not supported (406 Not Acceptable) + + + + + + + + + + + + + +
+
+ +
+
+

Your browser is not supported.
Please upgrade your browser to continue.

+
+
+ + + + diff --git a/test/fixtures/deploy-rails-8/public/422.html b/test/fixtures/deploy-rails-8/public/422.html new file mode 100644 index 0000000000..8bcf06014f --- /dev/null +++ b/test/fixtures/deploy-rails-8/public/422.html @@ -0,0 +1,114 @@ + + + + + + + The change you wanted was rejected (422 Unprocessable Entity) + + + + + + + + + + + + + +
+
+ +
+
+

The change you wanted was rejected. Maybe you tried to change something you didn’t have access to. If you’re the application owner check the logs for more information.

+
+
+ + + + diff --git a/test/fixtures/deploy-rails-8/public/500.html b/test/fixtures/deploy-rails-8/public/500.html new file mode 100644 index 0000000000..d77718c3a4 --- /dev/null +++ b/test/fixtures/deploy-rails-8/public/500.html @@ -0,0 +1,114 @@ + + + + + + + We’re sorry, but something went wrong (500 Internal Server Error) + + + + + + + + + + + + + +
+
+ +
+
+

We’re sorry, but something went wrong.
If you’re the application owner check the logs for more information.

+
+
+ + + + diff --git a/test/fixtures/deploy-rails-8/public/icon.png b/test/fixtures/deploy-rails-8/public/icon.png new file mode 100644 index 0000000000000000000000000000000000000000..c4c9dbfbbd2f7c1421ffd5727188146213abbcef GIT binary patch literal 4166 zcmd6qU;WFw?|v@m)Sk^&NvB8tcujdV-r1b=i(NJxn&7{KTb zX$3(M+3TP2o^#KAo{#tIjl&t~(8D-k004kqPglzn0HFG(Q~(I*AKsD#M*g7!XK0T7 zN6P7j>HcT8rZgKl$v!xr806dyN19Bd4C0x_R*I-a?#zsTvb_89cyhuC&T**i|Rc zq5b8M;+{8KvoJ~uj9`u~d_f6`V&3+&ZX9x5pc8s)d175;@pjm(?dapmBcm0&vl9+W zx1ZD2o^nuyUHWj|^A8r>lUorO`wFF;>9XL-Jy!P}UXC{(z!FO%SH~8k`#|9;Q|eue zqWL0^Bp(fg_+Pkm!fDKRSY;+^@BF?AJE zCUWpXPst~hi_~u)SzYBDZroR+Z4xeHIlm_3Yc_9nZ(o_gg!jDgVa=E}Y8uDgem9`b zf=mfJ_@(BXSkW53B)F2s!&?_R4ptb1fYXlF++@vPhd=marQgEGRZS@B4g1Mu?euknL= z67P~tZ?*>-Hmi7GwlisNHHJDku-dSm7g@!=a}9cSL6Pa^w^2?&?$Oi8ibrr>w)xqx zOH_EMU@m05)9kuNR>>4@H%|){U$^yvVQ(YgOlh;5oU_-vivG-p4=LrN-k7D?*?u1u zsWly%tfAzKd6Fb=`eU2un_uaTXmcT#tlOL+aRS=kZZf}A7qT8lvcTx~7j` z*b>=z)mwg7%B2_!D0!1IZ?Nq{^Y$uI4Qx*6T!E2Col&2{k?ImCO=dD~A&9f9diXy^$x{6CwkBimn|1E09 zAMSezYtiL?O6hS37KpvDM?22&d{l)7h-!F)C-d3j8Z`c@($?mfd{R82)H>Qe`h{~G z!I}(2j(|49{LR?w4Jspl_i!(4T{31|dqCOpI52r5NhxYV+cDAu(xp*4iqZ2e-$YP= zoFOPmm|u*7C?S{Fp43y+V;>~@FFR76bCl@pTtyB93vNWy5yf;HKr8^0d7&GVIslYm zo3Tgt@M!`8B6IW&lK{Xk>%zp41G%`(DR&^u z5^pwD4>E6-w<8Kl2DzJ%a@~QDE$(e87lNhy?-Qgep!$b?5f7+&EM7$e>|WrX+=zCb z=!f5P>MxFyy;mIRxjc(H*}mceXw5a*IpC0PEYJ8Y3{JdoIW)@t97{wcUB@u+$FCCO z;s2Qe(d~oJC^`m$7DE-dsha`glrtu&v&93IZadvl_yjp!c89>zo;Krk+d&DEG4?x$ zufC1n+c1XD7dolX1q|7}uelR$`pT0Z)1jun<39$Sn2V5g&|(j~Z!wOddfYiZo7)A< z!dK`aBHOOk+-E_xbWCA3VR-+o$i5eO9`rMI#p_0xQ}rjEpGW;U!&&PKnivOcG(|m9 z!C8?WC6nCXw25WVa*eew)zQ=h45k8jSIPbq&?VE{oG%?4>9rwEeB4&qe#?-y_es4c|7ufw%+H5EY#oCgv!Lzv291#-oNlX~X+Jl5(riC~r z=0M|wMOP)Tt8@hNg&%V@Z9@J|Q#K*hE>sr6@oguas9&6^-=~$*2Gs%h#GF@h)i=Im z^iKk~ipWJg1VrvKS;_2lgs3n1zvNvxb27nGM=NXE!D4C!U`f*K2B@^^&ij9y}DTLB*FI zEnBL6y{jc?JqXWbkIZd7I16hA>(f9T!iwbIxJj~bKPfrO;>%*5nk&Lf?G@c2wvGrY&41$W{7HM9+b@&XY@>NZM5s|EK_Dp zQX60CBuantx>|d#DsaZ*8MW(we|#KTYZ=vNa#d*DJQe6hr~J6{_rI#?wi@s|&O}FR zG$kfPxheXh1?IZ{bDT-CWB4FTvO-k5scW^mi8?iY5Q`f8JcnnCxiy@m@D-%lO;y0pTLhh6i6l@x52j=#^$5_U^os}OFg zzdHbo(QI`%9#o*r8GCW~T3UdV`szO#~)^&X_(VW>o~umY9-ns9-V4lf~j z`QBD~pJ4a#b`*6bJ^3RS5y?RAgF7K5$ll97Y8#WZduZ`j?IEY~H(s^doZg>7-tk*t z4_QE1%%bb^p~4F5SB$t2i1>DBG1cIo;2(xTaj*Y~hlM{tSDHojL-QPg%Mo%6^7FrpB*{ z4G0@T{-77Por4DCMF zB_5Y~Phv%EQ64W8^GS6h?x6xh;w2{z3$rhC;m+;uD&pR74j+i22P5DS-tE8ABvH(U~indEbBUTAAAXfHZg5QpB@TgV9eI<)JrAkOI z8!TSOgfAJiWAXeM&vR4Glh;VxH}WG&V$bVb`a`g}GSpwggti*&)taV1@Ak|{WrV|5 zmNYx)Ans=S{c52qv@+jmGQ&vd6>6yX6IKq9O$3r&0xUTdZ!m1!irzn`SY+F23Rl6# zFRxws&gV-kM1NX(3(gnKpGi0Q)Dxi~#?nyzOR9!en;Ij>YJZVFAL*=R%7y%Mz9hU% zs>+ZB?qRmZ)nISx7wxY)y#cd$iaC~{k0avD>BjyF1q^mNQ1QcwsxiTySe<6C&cC6P zE`vwO9^k-d`9hZ!+r@Jnr+MF*2;2l8WjZ}DrwDUHzSF{WoG zucbSWguA!3KgB3MU%HH`R;XqVv0CcaGq?+;v_A5A2kpmk5V%qZE3yzQ7R5XWhq=eR zyUezH=@V)y>L9T-M-?tW(PQYTRBKZSVb_!$^H-Pn%ea;!vS_?M<~Tm>_rWIW43sPW z=!lY&fWc1g7+r?R)0p8(%zp&vl+FK4HRkns%BW+Up&wK8!lQ2~bja|9bD12WrKn#M zK)Yl9*8$SI7MAwSK$%)dMd>o+1UD<2&aQMhyjS5R{-vV+M;Q4bzl~Z~=4HFj_#2V9 zB)Gfzx3ncy@uzx?yzi}6>d%-?WE}h7v*w)Jr_gBl!2P&F3DX>j_1#--yjpL%<;JMR z*b70Gr)MMIBWDo~#<5F^Q0$VKI;SBIRneuR7)yVsN~A9I@gZTXe)E?iVII+X5h0~H zx^c(fP&4>!*q>fb6dAOC?MI>Cz3kld#J*;uik+Ps49cwm1B4 zZc1|ZxYyTv;{Z!?qS=D)sgRKx^1AYf%;y_V&VgZglfU>d+Ufk5&LV$sKv}Hoj+s; xK3FZRYdhbXT_@RW*ff3@`D1#ps#~H)p+y&j#(J|vk^lW{fF9OJt5(B-_&*Xgn9~3N literal 0 HcmV?d00001 diff --git a/test/fixtures/deploy-rails-8/public/icon.svg b/test/fixtures/deploy-rails-8/public/icon.svg new file mode 100644 index 0000000000..04b34bf83f --- /dev/null +++ b/test/fixtures/deploy-rails-8/public/icon.svg @@ -0,0 +1,3 @@ + + + diff --git a/test/fixtures/deploy-rails-8/public/robots.txt b/test/fixtures/deploy-rails-8/public/robots.txt new file mode 100644 index 0000000000..c19f78ab68 --- /dev/null +++ b/test/fixtures/deploy-rails-8/public/robots.txt @@ -0,0 +1 @@ +# See https://www.robotstxt.org/robotstxt.html for documentation on how to use the robots.txt file diff --git a/test/fixtures/deploy-rails-8/script/.keep b/test/fixtures/deploy-rails-8/script/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-8/storage/.keep b/test/fixtures/deploy-rails-8/storage/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-8/test/application_system_test_case.rb b/test/fixtures/deploy-rails-8/test/application_system_test_case.rb new file mode 100644 index 0000000000..cee29fd214 --- /dev/null +++ b/test/fixtures/deploy-rails-8/test/application_system_test_case.rb @@ -0,0 +1,5 @@ +require "test_helper" + +class ApplicationSystemTestCase < ActionDispatch::SystemTestCase + driven_by :selenium, using: :headless_chrome, screen_size: [ 1400, 1400 ] +end diff --git a/test/fixtures/deploy-rails-8/test/controllers/.keep b/test/fixtures/deploy-rails-8/test/controllers/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-8/test/fixtures/files/.keep b/test/fixtures/deploy-rails-8/test/fixtures/files/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-8/test/helpers/.keep b/test/fixtures/deploy-rails-8/test/helpers/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-8/test/integration/.keep b/test/fixtures/deploy-rails-8/test/integration/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-8/test/mailers/.keep b/test/fixtures/deploy-rails-8/test/mailers/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-8/test/models/.keep b/test/fixtures/deploy-rails-8/test/models/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-8/test/system/.keep b/test/fixtures/deploy-rails-8/test/system/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-8/test/test_helper.rb b/test/fixtures/deploy-rails-8/test/test_helper.rb new file mode 100644 index 0000000000..0c22470ec1 --- /dev/null +++ b/test/fixtures/deploy-rails-8/test/test_helper.rb @@ -0,0 +1,15 @@ +ENV["RAILS_ENV"] ||= "test" +require_relative "../config/environment" +require "rails/test_help" + +module ActiveSupport + class TestCase + # Run tests in parallel with specified workers + parallelize(workers: :number_of_processors) + + # Setup all fixtures in test/fixtures/*.yml for all tests in alphabetical order. + fixtures :all + + # Add more helper methods to be used by all tests here... + end +end diff --git a/test/fixtures/deploy-rails-8/vendor/.keep b/test/fixtures/deploy-rails-8/vendor/.keep new file mode 100644 index 0000000000..e69de29bb2 From 6e12d09b74377e4a94ccd989fe6c9e9e164edf2c Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Sat, 12 Oct 2024 10:29:08 -0400 Subject: [PATCH 068/104] use bash_profile instead of profile since we're using a bash login shell --- deployer.Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deployer.Dockerfile b/deployer.Dockerfile index 3fe58edb91..f2c5205a56 100644 --- a/deployer.Dockerfile +++ b/deployer.Dockerfile @@ -44,13 +44,13 @@ RUN gpg --keyserver keyserver.ubuntu.com --recv-keys 409B6B1796C275462A170311380 # install mise RUN curl https://mise.run | MISE_VERSION=v2024.8.6 sh && \ - echo -e "\n\nexport PATH=\"$PATH:$HOME/.local/bin:$HOME/.local/share/mise/shims\"" >> ~/.profile + echo -e "\n\nexport PATH=\"$HOME/.local/bin:$HOME/.local/share/mise/shims:$PATH\"" >> ~/.bash_profile ENV MISE_PYTHON_COMPILE=false # install asdf, its plugins and dependencies RUN git clone https://github.com/asdf-vm/asdf.git $HOME/.asdf --branch v0.14.0 && \ - echo -e "\n\n## Configure ASDF \n. $HOME/.asdf/asdf.sh" >> ~/.profile && \ + echo -e "\n\n## Configure ASDF \n. $HOME/.asdf/asdf.sh" >> ~/.bash_profile && \ source $HOME/.asdf/asdf.sh && \ # nodejs curl -L https://github.com/nodenv/node-build/archive/refs/tags/v$NODE_BUILD_VERSION.tar.gz -o node-build.tar.gz && \ From 68e01e2dc0675e208bde6bab13350edc919cdbf3 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Sat, 12 Oct 2024 11:18:22 -0400 Subject: [PATCH 069/104] install rvm after mise... --- deployer.Dockerfile | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/deployer.Dockerfile b/deployer.Dockerfile index f2c5205a56..ef88869421 100644 --- a/deployer.Dockerfile +++ b/deployer.Dockerfile @@ -29,10 +29,11 @@ ENV DEFAULT_RUBY_VERSION=3.1.6 \ ARG NODE_BUILD_VERSION=5.3.8 -# install a ruby to run the initial script -# RUN echo 'source "/etc/profile.d/rvm.sh"' >> ~/.bashrc && \ -# usermod -a -G rvm root && \ -# rvm install $DEFAULT_RUBY_VERSION && rvm --default use $DEFAULT_RUBY_VERSION && gem update --system && gem install bundler +# install mise +RUN curl https://mise.run | MISE_VERSION=v2024.8.6 sh && \ + echo -e "\n\nexport PATH=\"$HOME/.local/bin:$HOME/.local/share/mise/shims:$PATH\"" >> ~/.bash_profile + +ENV MISE_PYTHON_COMPILE=false RUN gpg --keyserver keyserver.ubuntu.com --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB && \ curl -sSL https://get.rvm.io | bash -s stable && \ @@ -42,12 +43,6 @@ RUN gpg --keyserver keyserver.ubuntu.com --recv-keys 409B6B1796C275462A170311380 echo -e "\nsource /etc/profile.d/rvm.sh" >> ~/.bash_profile && \ echo -e "\nrvm use default &> /dev/null" >> ~/.bash_profile -# install mise -RUN curl https://mise.run | MISE_VERSION=v2024.8.6 sh && \ - echo -e "\n\nexport PATH=\"$HOME/.local/bin:$HOME/.local/share/mise/shims:$PATH\"" >> ~/.bash_profile - -ENV MISE_PYTHON_COMPILE=false - # install asdf, its plugins and dependencies RUN git clone https://github.com/asdf-vm/asdf.git $HOME/.asdf --branch v0.14.0 && \ echo -e "\n\n## Configure ASDF \n. $HOME/.asdf/asdf.sh" >> ~/.bash_profile && \ From 1329ad08cbd7c80df0c4ebc1a0de0ca1c57f06d9 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 14 Oct 2024 15:24:39 -0400 Subject: [PATCH 070/104] set node.js for nextjs, nuxt and redwood projects --- scanner/nextjs.go | 5 +++++ scanner/nuxtjs.go | 5 +++++ scanner/python.go | 6 ++++++ scanner/redwood.go | 5 +++++ scanner/rust.go | 2 ++ 5 files changed, 23 insertions(+) diff --git a/scanner/nextjs.go b/scanner/nextjs.go index af4001087e..e45e2834cd 100644 --- a/scanner/nextjs.go +++ b/scanner/nextjs.go @@ -22,5 +22,10 @@ func configureNextJs(sourceDir string, config *ScannerConfig) (*SourceInfo, erro "NEXT_PUBLIC_EXAMPLE": "Value goes here", } + // detect node.js version properly... + if nodeS, err := configureNode(sourceDir, config); err == nil && nodeS != nil { + s.Runtime = nodeS.Runtime + } + return s, nil } diff --git a/scanner/nuxtjs.go b/scanner/nuxtjs.go index 087f5792a4..5170089eba 100644 --- a/scanner/nuxtjs.go +++ b/scanner/nuxtjs.go @@ -18,5 +18,10 @@ func configureNuxt(sourceDir string, config *ScannerConfig) (*SourceInfo, error) s.Files = templates("templates/nuxtjs") + // detect node.js version properly... + if nodeS, err := configureNode(sourceDir, config); err == nil && nodeS != nil { + s.Runtime = nodeS.Runtime + } + return s, nil } diff --git a/scanner/python.go b/scanner/python.go index b832d86d8d..addfcfeef6 100644 --- a/scanner/python.go +++ b/scanner/python.go @@ -345,6 +345,11 @@ func configurePython(sourceDir string, _ *ScannerConfig) (*SourceInfo, error) { return nil, nil } + pythonVersion, _, err := extractPythonVersion() + if err != nil { + return nil, err + } + s := &SourceInfo{ Files: templates("templates/python"), Builder: "paketobuildpacks/builder:base", @@ -355,6 +360,7 @@ func configurePython(sourceDir string, _ *ScannerConfig) (*SourceInfo, error) { }, SkipDeploy: true, DeployDocs: `We have generated a simple Procfile for you. Modify it to fit your needs and run "fly deploy" to deploy your application.`, + Runtime: plan.RuntimeStruct{Language: "python", Version: pythonVersion}, } return s, nil diff --git a/scanner/redwood.go b/scanner/redwood.go index 0b67010629..0ac13967d9 100644 --- a/scanner/redwood.go +++ b/scanner/redwood.go @@ -30,5 +30,10 @@ func configureRedwood(sourceDir string, config *ScannerConfig) (*SourceInfo, err s.Notice = "\nThis deployment will run an SQLite on a single dedicated volume. The app can't scale beyond a single instance. Look into 'fly postgres' for a more robust production database that supports scaling up. \n" } + // detect node.js version properly... + if nodeS, err := configureNode(sourceDir, config); err == nil && nodeS != nil { + s.Runtime = nodeS.Runtime + } + return s, nil } diff --git a/scanner/rust.go b/scanner/rust.go index 85babb1b0c..50b02499a6 100644 --- a/scanner/rust.go +++ b/scanner/rust.go @@ -43,5 +43,7 @@ func configureRust(sourceDir string, _ *ScannerConfig) (*SourceInfo, error) { SkipDatabase: true, Runtime: plan.RuntimeStruct{Language: "rust"}, } + + // TODO: check Cargo.toml's `package.rust-version` and rust-toolchain.toml's `toolchain.channel` return s, nil } From 2f3878eb376339e10037a6c2ac97bc76aa106358 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Tue, 15 Oct 2024 09:59:58 -0400 Subject: [PATCH 071/104] install a default node.js version if anything needs it --- deployer.Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/deployer.Dockerfile b/deployer.Dockerfile index ef88869421..757e36cee6 100644 --- a/deployer.Dockerfile +++ b/deployer.Dockerfile @@ -52,6 +52,7 @@ RUN git clone https://github.com/asdf-vm/asdf.git $HOME/.asdf --branch v0.14.0 & tar -xzf node-build.tar.gz && \ env PREFIX=/usr/local ./node-build-$NODE_BUILD_VERSION/install.sh && \ asdf plugin add nodejs https://github.com/asdf-vm/asdf-nodejs.git && \ + asdf install nodejs $DEFAULT_NODE_VERSION && asdf global nodejs $DEFAULT_NODE_VERSION && \ # elixir asdf plugin-add erlang https://github.com/michallepicki/asdf-erlang-prebuilt-ubuntu-20.04.git && \ echo -e "local.hex\nlocal.rebar" > $HOME/.default-mix-commands && \ From 14739e4e6d0de4fb6bd79253c13868846519b953 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Tue, 15 Oct 2024 14:29:08 -0400 Subject: [PATCH 072/104] switch to node 20 by default --- deployer.Dockerfile | 2 +- test/fixtures/deploy-node-no-dockerfile/index.js | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deployer.Dockerfile b/deployer.Dockerfile index 757e36cee6..14a91ea1c5 100644 --- a/deployer.Dockerfile +++ b/deployer.Dockerfile @@ -20,7 +20,7 @@ RUN git config --global advice.detachedHead false && \ git config --global init.defaultBranch main ENV DEFAULT_RUBY_VERSION=3.1.6 \ - DEFAULT_NODE_VERSION=18.16.0 \ + DEFAULT_NODE_VERSION=20.18.0 \ DEFAULT_ERLANG_VERSION=26.2.5.2 \ DEFAULT_ELIXIR_VERSION=1.16 \ DEFAULT_BUN_VERSION=1.1.24 \ diff --git a/test/fixtures/deploy-node-no-dockerfile/index.js b/test/fixtures/deploy-node-no-dockerfile/index.js index ee281aeee0..0d3cbf6f42 100644 --- a/test/fixtures/deploy-node-no-dockerfile/index.js +++ b/test/fixtures/deploy-node-no-dockerfile/index.js @@ -10,4 +10,4 @@ http.createServer((request, response) => { response.write("Hello, World!"); response.end(); -}).listen(8080); +}).listen(3000); From a46108d6b8b18b3e78f793db7c86bd1934be9fa9 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Tue, 15 Oct 2024 15:06:47 -0400 Subject: [PATCH 073/104] maybe delete the container more proactively --- test/testlib/deployer.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/test/testlib/deployer.go b/test/testlib/deployer.go index 06d86fd9bb..b33d379558 100644 --- a/test/testlib/deployer.go +++ b/test/testlib/deployer.go @@ -260,6 +260,12 @@ func (d *DeployTestRun) Start(ctx context.Context) error { d.waitErrCh = make(chan error, 1) go func() { + defer d.dockerClient.ContainerRemove(context.TODO(), cont.ID, container.RemoveOptions{ + RemoveVolumes: true, + RemoveLinks: true, + Force: true, + }) + defer d.Close() logs, err := d.dockerClient.ContainerLogs(context.Background(), cont.ID, container.LogsOptions{ @@ -395,11 +401,7 @@ func (d *DeployTestRun) Output() *DeployerOut { } func (d *DeployTestRun) Close() error { - return d.dockerClient.ContainerRemove(context.TODO(), d.containerID, container.RemoveOptions{ - RemoveVolumes: true, - RemoveLinks: true, - Force: true, - }) + return nil } type log struct { From 11ed89fd6bbe9583c9e7f99ddc590d9aa986b4bc Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Tue, 15 Oct 2024 15:24:03 -0400 Subject: [PATCH 074/104] run deployer tests as root --- .github/workflows/deployer-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/deployer-tests.yml b/.github/workflows/deployer-tests.yml index 9d5b034017..43eebefea8 100644 --- a/.github/workflows/deployer-tests.yml +++ b/.github/workflows/deployer-tests.yml @@ -91,7 +91,7 @@ jobs: run: | export PATH=$PWD/bin:$PATH echo -n failed= >> $GITHUB_OUTPUT - ./scripts/deployer-tests.sh -r "${{ github.ref }}" -t "${{ matrix.parallelism }}" -i "${{ matrix.index }}" -o $GITHUB_OUTPUT + sudo -E ./scripts/deployer-tests.sh -r "${{ github.ref }}" -t "${{ matrix.parallelism }}" -i "${{ matrix.index }}" -o $GITHUB_OUTPUT - name: Post failure to slack if: ${{ github.ref == 'refs/heads/master' && failure() }} uses: slackapi/slack-github-action@37ebaef184d7626c5f204ab8d3baff4262dd30f0 From 0044e780964ab656946f706660369045640f1d75 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Tue, 15 Oct 2024 15:27:58 -0400 Subject: [PATCH 075/104] also install gotessplit as root --- .github/workflows/deployer-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/deployer-tests.yml b/.github/workflows/deployer-tests.yml index 43eebefea8..ced132901d 100644 --- a/.github/workflows/deployer-tests.yml +++ b/.github/workflows/deployer-tests.yml @@ -65,7 +65,7 @@ jobs: run: echo "name=version::$(go env GOVERSION)" >> $GITHUB_OUTPUT - name: Install gotesplit, set FLY_PREFLIGHT_TEST_APP_PREFIX run: | - curl -sfL https://raw.githubusercontent.com/Songmu/gotesplit/v0.2.1/install.sh | sh -s + curl -sfL https://raw.githubusercontent.com/Songmu/gotesplit/v0.2.1/install.sh | sudo -E sh -s echo "FLY_PREFLIGHT_TEST_APP_PREFIX=pf-gha-$(openssl rand -hex 4)" >> "$GITHUB_ENV" - uses: actions/download-artifact@v4 with: From b2eacad3afa07d9101660788374bad3aac68fb15 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Tue, 15 Oct 2024 17:03:55 -0400 Subject: [PATCH 076/104] add a cleanup env var, mostly useful in tests --- .github/workflows/deployer-tests.yml | 4 ++-- deploy.rb | 9 +++++++++ test/deployer/deployer_test.go | 14 ++++++++++++++ test/testlib/deployer.go | 10 ++++++++++ 4 files changed, 35 insertions(+), 2 deletions(-) diff --git a/.github/workflows/deployer-tests.yml b/.github/workflows/deployer-tests.yml index ced132901d..9d5b034017 100644 --- a/.github/workflows/deployer-tests.yml +++ b/.github/workflows/deployer-tests.yml @@ -65,7 +65,7 @@ jobs: run: echo "name=version::$(go env GOVERSION)" >> $GITHUB_OUTPUT - name: Install gotesplit, set FLY_PREFLIGHT_TEST_APP_PREFIX run: | - curl -sfL https://raw.githubusercontent.com/Songmu/gotesplit/v0.2.1/install.sh | sudo -E sh -s + curl -sfL https://raw.githubusercontent.com/Songmu/gotesplit/v0.2.1/install.sh | sh -s echo "FLY_PREFLIGHT_TEST_APP_PREFIX=pf-gha-$(openssl rand -hex 4)" >> "$GITHUB_ENV" - uses: actions/download-artifact@v4 with: @@ -91,7 +91,7 @@ jobs: run: | export PATH=$PWD/bin:$PATH echo -n failed= >> $GITHUB_OUTPUT - sudo -E ./scripts/deployer-tests.sh -r "${{ github.ref }}" -t "${{ matrix.parallelism }}" -i "${{ matrix.index }}" -o $GITHUB_OUTPUT + ./scripts/deployer-tests.sh -r "${{ github.ref }}" -t "${{ matrix.parallelism }}" -i "${{ matrix.index }}" -o $GITHUB_OUTPUT - name: Post failure to slack if: ${{ github.ref == 'refs/heads/master' && failure() }} uses: slackapi/slack-github-action@37ebaef184d7626c5f204ab8d3baff4262dd30f0 diff --git a/deploy.rb b/deploy.rb index 10b1567f03..4be91478b7 100755 --- a/deploy.rb +++ b/deploy.rb @@ -423,4 +423,13 @@ end end +if !get_env("DEPLOYER_CLEANUP_BEFORE_EXIT").nil? + if GIT_REPO + `git clean -f -x -d` + else + Dir.chdir("..") + `rm -rf app/*` + end +end + event :end, { ts: ts() } \ No newline at end of file diff --git a/test/deployer/deployer_test.go b/test/deployer/deployer_test.go index d4776442bf..998439c9a6 100644 --- a/test/deployer/deployer_test.go +++ b/test/deployer/deployer_test.go @@ -6,6 +6,7 @@ package deployer import ( "context" "fmt" + "os" "testing" "github.com/stretchr/testify/require" @@ -165,6 +166,7 @@ func TestLaunchRails8(t *testing.T) { testlib.WithouExtensions, testlib.DeployNow, withWorkDirAppSource, + testlib.CleanupBeforeExit, ) manifest, err := deploy.Output().ArtifactManifest() @@ -177,6 +179,18 @@ func TestLaunchRails8(t *testing.T) { _, err = testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev/up", appName)) require.NoError(t, err) + + if entries, err := os.ReadDir(fmt.Sprintf("%s/tmp", deploy.WorkDir())); err == nil { + for _, entry := range entries { + var mode = entry.Type() + info, _ := entry.Info() + if info != nil { + mode = info.Mode() + } + fmt.Printf("entry: %s (%s)\n", entry.Name(), mode) + } + } + } func createRandomApp(d *testlib.DeployTestRun) { diff --git a/test/testlib/deployer.go b/test/testlib/deployer.go index b33d379558..22559361db 100644 --- a/test/testlib/deployer.go +++ b/test/testlib/deployer.go @@ -100,6 +100,8 @@ type DeployTestRun struct { createAndPushBranch bool + cleanupBeforeExit bool + containerBinds []string containerID string @@ -171,6 +173,10 @@ func CreateAndPushBranch(d *DeployTestRun) { d.createAndPushBranch = true } +func CleanupBeforeExit(d *DeployTestRun) { + d.cleanupBeforeExit = true +} + func WithAppSource(src string) func(*DeployTestRun) { return func(d *DeployTestRun) { d.containerBinds = append(d.containerBinds, fmt.Sprintf("%s:/usr/src/app", src)) @@ -227,6 +233,10 @@ func (d *DeployTestRun) Start(ctx context.Context) error { env = append(env, "DEPLOY_CREATE_AND_PUSH_BRANCH=1") } + if d.cleanupBeforeExit { + env = append(env, "DEPLOYER_CLEANUP_BEFORE_EXIT=1") + } + fmt.Printf("creating container... image=%s\n", d.deployerImage) cont, err := d.dockerClient.ContainerCreate(ctx, &container.Config{ Image: d.deployerImage, From 84665ab66a140d01052a50373be24902544d7407 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Wed, 16 Oct 2024 08:11:46 -0400 Subject: [PATCH 077/104] detect a Dockerfile first and foremost --- scanner/scanner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scanner/scanner.go b/scanner/scanner.go index 017255fff5..64b8b0f16a 100644 --- a/scanner/scanner.go +++ b/scanner/scanner.go @@ -112,6 +112,7 @@ type GitHubActionsStruct struct { func Scan(sourceDir string, config *ScannerConfig) (*SourceInfo, error) { scanners := []sourceScanner{ + configureDockerfile, configureDjango, configureLaravel, configurePhoenix, @@ -121,7 +122,6 @@ func Scan(sourceDir string, config *ScannerConfig) (*SourceInfo, error) { /* frameworks scanners are placed before generic scanners, since they might mix languages or have a Dockerfile that doesn't work with Fly */ - configureDockerfile, configureBridgetown, configureLucky, configureRuby, From ede91b5dbab4dacd6eb812d6bdd20f9e12152521 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 21 Oct 2024 13:25:46 -0400 Subject: [PATCH 078/104] Deployer: skip installing dependencies in some cases (#4019) skip installing dependencies for rails if a Dockerfile is already present --- deploy.rb | 3 +- internal/command/launch/plan/plan.go | 5 +- scanner/rails.go | 218 ++++++------- scanner/scanner.go | 2 +- test/deployer/deployer_test.go | 62 +++- test/fixtures/deploy-rails-7.0/.gitattributes | 7 + test/fixtures/deploy-rails-7.0/.gitignore | 35 +++ test/fixtures/deploy-rails-7.0/.ruby-version | 1 + test/fixtures/deploy-rails-7.0/Gemfile | 72 +++++ test/fixtures/deploy-rails-7.0/Gemfile.lock | 239 ++++++++++++++ test/fixtures/deploy-rails-7.0/README.md | 24 ++ test/fixtures/deploy-rails-7.0/Rakefile | 6 + .../app/assets/config/manifest.js | 4 + .../deploy-rails-7.0/app/assets/images/.keep | 0 .../app/assets/stylesheets/application.css | 15 + .../app/channels/application_cable/channel.rb | 4 + .../channels/application_cable/connection.rb | 4 + .../app/controllers/application_controller.rb | 2 + .../app/controllers/concerns/.keep | 0 .../app/helpers/application_helper.rb | 2 + .../app/javascript/application.js | 3 + .../app/javascript/controllers/application.js | 9 + .../controllers/hello_controller.js | 7 + .../app/javascript/controllers/index.js | 4 + .../app/jobs/application_job.rb | 7 + .../app/mailers/application_mailer.rb | 4 + .../app/models/application_record.rb | 3 + .../app/models/concerns/.keep | 0 .../app/views/layouts/application.html.erb | 16 + .../app/views/layouts/mailer.html.erb | 13 + .../app/views/layouts/mailer.text.erb | 1 + test/fixtures/deploy-rails-7.0/bin/bundle | 114 +++++++ test/fixtures/deploy-rails-7.0/bin/importmap | 4 + test/fixtures/deploy-rails-7.0/bin/rails | 4 + test/fixtures/deploy-rails-7.0/bin/rake | 4 + test/fixtures/deploy-rails-7.0/bin/setup | 33 ++ test/fixtures/deploy-rails-7.0/config.ru | 6 + .../deploy-rails-7.0/config/application.rb | 22 ++ test/fixtures/deploy-rails-7.0/config/boot.rb | 4 + .../deploy-rails-7.0/config/cable.yml | 10 + .../config/credentials.yml.enc | 1 + .../deploy-rails-7.0/config/database.yml | 25 ++ .../deploy-rails-7.0/config/environment.rb | 5 + .../config/environments/development.rb | 70 +++++ .../config/environments/production.rb | 93 ++++++ .../config/environments/test.rb | 60 ++++ .../deploy-rails-7.0/config/importmap.rb | 7 + .../config/initializers/assets.rb | 12 + .../initializers/content_security_policy.rb | 25 ++ .../initializers/filter_parameter_logging.rb | 8 + .../config/initializers/inflections.rb | 16 + .../config/initializers/permissions_policy.rb | 11 + .../deploy-rails-7.0/config/locales/en.yml | 33 ++ test/fixtures/deploy-rails-7.0/config/puma.rb | 43 +++ .../deploy-rails-7.0/config/routes.rb | 6 + .../deploy-rails-7.0/config/storage.yml | 34 ++ test/fixtures/deploy-rails-7.0/db/seeds.rb | 7 + .../deploy-rails-7.0/lib/assets/.keep | 0 .../fixtures/deploy-rails-7.0/lib/tasks/.keep | 0 test/fixtures/deploy-rails-7.0/log/.keep | 0 .../fixtures/deploy-rails-7.0/public/404.html | 67 ++++ .../fixtures/deploy-rails-7.0/public/422.html | 67 ++++ .../fixtures/deploy-rails-7.0/public/500.html | 66 ++++ .../public/apple-touch-icon-precomposed.png | 0 .../public/apple-touch-icon.png | 0 .../deploy-rails-7.0/public/favicon.ico | 0 .../deploy-rails-7.0/public/robots.txt | 1 + test/fixtures/deploy-rails-7.0/storage/.keep | 0 .../test/application_system_test_case.rb | 5 + .../application_cable/connection_test.rb | 11 + .../deploy-rails-7.0/test/controllers/.keep | 0 .../test/fixtures/files/.keep | 0 .../deploy-rails-7.0/test/helpers/.keep | 0 .../deploy-rails-7.0/test/integration/.keep | 0 .../deploy-rails-7.0/test/mailers/.keep | 0 .../deploy-rails-7.0/test/models/.keep | 0 .../deploy-rails-7.0/test/system/.keep | 0 .../deploy-rails-7.0/test/test_helper.rb | 13 + test/fixtures/deploy-rails-7.0/vendor/.keep | 0 .../deploy-rails-7.0/vendor/javascript/.keep | 0 test/fixtures/deploy-rails-7.2/.dockerignore | 48 +++ test/fixtures/deploy-rails-7.2/.gitattributes | 9 + .../deploy-rails-7.2/.github/dependabot.yml | 12 + .../deploy-rails-7.2/.github/workflows/ci.yml | 90 ++++++ test/fixtures/deploy-rails-7.2/.gitignore | 35 +++ test/fixtures/deploy-rails-7.2/.rubocop.yml | 8 + test/fixtures/deploy-rails-7.2/.ruby-version | 1 + test/fixtures/deploy-rails-7.2/Dockerfile | 69 ++++ test/fixtures/deploy-rails-7.2/Gemfile | 60 ++++ test/fixtures/deploy-rails-7.2/Gemfile.lock | 296 ++++++++++++++++++ test/fixtures/deploy-rails-7.2/README.md | 24 ++ test/fixtures/deploy-rails-7.2/Rakefile | 6 + .../app/assets/config/manifest.js | 4 + .../deploy-rails-7.2/app/assets/images/.keep | 0 .../app/assets/stylesheets/application.css | 15 + .../app/channels/application_cable/channel.rb | 4 + .../channels/application_cable/connection.rb | 4 + .../app/controllers/application_controller.rb | 4 + .../app/controllers/concerns/.keep | 0 .../app/helpers/application_helper.rb | 2 + .../app/javascript/application.js | 3 + .../app/javascript/controllers/application.js | 9 + .../controllers/hello_controller.js | 7 + .../app/javascript/controllers/index.js | 4 + .../app/jobs/application_job.rb | 7 + .../app/mailers/application_mailer.rb | 4 + .../app/models/application_record.rb | 3 + .../app/models/concerns/.keep | 0 .../app/views/layouts/application.html.erb | 23 ++ .../app/views/layouts/mailer.html.erb | 13 + .../app/views/layouts/mailer.text.erb | 1 + .../app/views/pwa/manifest.json.erb | 22 ++ .../app/views/pwa/service-worker.js | 26 ++ test/fixtures/deploy-rails-7.2/bin/brakeman | 7 + test/fixtures/deploy-rails-7.2/bin/bundle | 114 +++++++ .../deploy-rails-7.2/bin/docker-entrypoint | 13 + test/fixtures/deploy-rails-7.2/bin/importmap | 4 + test/fixtures/deploy-rails-7.2/bin/rails | 4 + test/fixtures/deploy-rails-7.2/bin/rake | 4 + test/fixtures/deploy-rails-7.2/bin/rubocop | 8 + test/fixtures/deploy-rails-7.2/bin/setup | 37 +++ test/fixtures/deploy-rails-7.2/config.ru | 6 + .../deploy-rails-7.2/config/application.rb | 27 ++ test/fixtures/deploy-rails-7.2/config/boot.rb | 4 + .../deploy-rails-7.2/config/cable.yml | 10 + .../config/credentials.yml.enc | 1 + .../deploy-rails-7.2/config/database.yml | 32 ++ .../deploy-rails-7.2/config/environment.rb | 5 + .../config/environments/development.rb | 81 +++++ .../config/environments/production.rb | 102 ++++++ .../config/environments/test.rb | 67 ++++ .../deploy-rails-7.2/config/importmap.rb | 7 + .../config/initializers/assets.rb | 12 + .../initializers/content_security_policy.rb | 25 ++ .../initializers/filter_parameter_logging.rb | 8 + .../config/initializers/inflections.rb | 16 + .../config/initializers/permissions_policy.rb | 13 + .../deploy-rails-7.2/config/locales/en.yml | 31 ++ test/fixtures/deploy-rails-7.2/config/puma.rb | 34 ++ .../deploy-rails-7.2/config/routes.rb | 14 + .../deploy-rails-7.2/config/storage.yml | 34 ++ test/fixtures/deploy-rails-7.2/db/seeds.rb | 9 + .../deploy-rails-7.2/lib/assets/.keep | 0 .../fixtures/deploy-rails-7.2/lib/tasks/.keep | 0 test/fixtures/deploy-rails-7.2/log/.keep | 0 .../fixtures/deploy-rails-7.2/public/404.html | 67 ++++ .../public/406-unsupported-browser.html | 66 ++++ .../fixtures/deploy-rails-7.2/public/422.html | 67 ++++ .../fixtures/deploy-rails-7.2/public/500.html | 66 ++++ .../fixtures/deploy-rails-7.2/public/icon.png | Bin 0 -> 5599 bytes .../fixtures/deploy-rails-7.2/public/icon.svg | 3 + .../deploy-rails-7.2/public/robots.txt | 1 + test/fixtures/deploy-rails-7.2/storage/.keep | 0 .../test/application_system_test_case.rb | 5 + .../application_cable/connection_test.rb | 13 + .../deploy-rails-7.2/test/controllers/.keep | 0 .../test/fixtures/files/.keep | 0 .../deploy-rails-7.2/test/helpers/.keep | 0 .../deploy-rails-7.2/test/integration/.keep | 0 .../deploy-rails-7.2/test/mailers/.keep | 0 .../deploy-rails-7.2/test/models/.keep | 0 .../deploy-rails-7.2/test/system/.keep | 0 .../deploy-rails-7.2/test/test_helper.rb | 15 + test/fixtures/deploy-rails-7.2/vendor/.keep | 0 .../deploy-rails-7.2/vendor/javascript/.keep | 0 165 files changed, 3379 insertions(+), 125 deletions(-) create mode 100644 test/fixtures/deploy-rails-7.0/.gitattributes create mode 100644 test/fixtures/deploy-rails-7.0/.gitignore create mode 100644 test/fixtures/deploy-rails-7.0/.ruby-version create mode 100644 test/fixtures/deploy-rails-7.0/Gemfile create mode 100644 test/fixtures/deploy-rails-7.0/Gemfile.lock create mode 100644 test/fixtures/deploy-rails-7.0/README.md create mode 100644 test/fixtures/deploy-rails-7.0/Rakefile create mode 100644 test/fixtures/deploy-rails-7.0/app/assets/config/manifest.js create mode 100644 test/fixtures/deploy-rails-7.0/app/assets/images/.keep create mode 100644 test/fixtures/deploy-rails-7.0/app/assets/stylesheets/application.css create mode 100644 test/fixtures/deploy-rails-7.0/app/channels/application_cable/channel.rb create mode 100644 test/fixtures/deploy-rails-7.0/app/channels/application_cable/connection.rb create mode 100644 test/fixtures/deploy-rails-7.0/app/controllers/application_controller.rb create mode 100644 test/fixtures/deploy-rails-7.0/app/controllers/concerns/.keep create mode 100644 test/fixtures/deploy-rails-7.0/app/helpers/application_helper.rb create mode 100644 test/fixtures/deploy-rails-7.0/app/javascript/application.js create mode 100644 test/fixtures/deploy-rails-7.0/app/javascript/controllers/application.js create mode 100644 test/fixtures/deploy-rails-7.0/app/javascript/controllers/hello_controller.js create mode 100644 test/fixtures/deploy-rails-7.0/app/javascript/controllers/index.js create mode 100644 test/fixtures/deploy-rails-7.0/app/jobs/application_job.rb create mode 100644 test/fixtures/deploy-rails-7.0/app/mailers/application_mailer.rb create mode 100644 test/fixtures/deploy-rails-7.0/app/models/application_record.rb create mode 100644 test/fixtures/deploy-rails-7.0/app/models/concerns/.keep create mode 100644 test/fixtures/deploy-rails-7.0/app/views/layouts/application.html.erb create mode 100644 test/fixtures/deploy-rails-7.0/app/views/layouts/mailer.html.erb create mode 100644 test/fixtures/deploy-rails-7.0/app/views/layouts/mailer.text.erb create mode 100755 test/fixtures/deploy-rails-7.0/bin/bundle create mode 100755 test/fixtures/deploy-rails-7.0/bin/importmap create mode 100755 test/fixtures/deploy-rails-7.0/bin/rails create mode 100755 test/fixtures/deploy-rails-7.0/bin/rake create mode 100755 test/fixtures/deploy-rails-7.0/bin/setup create mode 100644 test/fixtures/deploy-rails-7.0/config.ru create mode 100644 test/fixtures/deploy-rails-7.0/config/application.rb create mode 100644 test/fixtures/deploy-rails-7.0/config/boot.rb create mode 100644 test/fixtures/deploy-rails-7.0/config/cable.yml create mode 100644 test/fixtures/deploy-rails-7.0/config/credentials.yml.enc create mode 100644 test/fixtures/deploy-rails-7.0/config/database.yml create mode 100644 test/fixtures/deploy-rails-7.0/config/environment.rb create mode 100644 test/fixtures/deploy-rails-7.0/config/environments/development.rb create mode 100644 test/fixtures/deploy-rails-7.0/config/environments/production.rb create mode 100644 test/fixtures/deploy-rails-7.0/config/environments/test.rb create mode 100644 test/fixtures/deploy-rails-7.0/config/importmap.rb create mode 100644 test/fixtures/deploy-rails-7.0/config/initializers/assets.rb create mode 100644 test/fixtures/deploy-rails-7.0/config/initializers/content_security_policy.rb create mode 100644 test/fixtures/deploy-rails-7.0/config/initializers/filter_parameter_logging.rb create mode 100644 test/fixtures/deploy-rails-7.0/config/initializers/inflections.rb create mode 100644 test/fixtures/deploy-rails-7.0/config/initializers/permissions_policy.rb create mode 100644 test/fixtures/deploy-rails-7.0/config/locales/en.yml create mode 100644 test/fixtures/deploy-rails-7.0/config/puma.rb create mode 100644 test/fixtures/deploy-rails-7.0/config/routes.rb create mode 100644 test/fixtures/deploy-rails-7.0/config/storage.yml create mode 100644 test/fixtures/deploy-rails-7.0/db/seeds.rb create mode 100644 test/fixtures/deploy-rails-7.0/lib/assets/.keep create mode 100644 test/fixtures/deploy-rails-7.0/lib/tasks/.keep create mode 100644 test/fixtures/deploy-rails-7.0/log/.keep create mode 100644 test/fixtures/deploy-rails-7.0/public/404.html create mode 100644 test/fixtures/deploy-rails-7.0/public/422.html create mode 100644 test/fixtures/deploy-rails-7.0/public/500.html create mode 100644 test/fixtures/deploy-rails-7.0/public/apple-touch-icon-precomposed.png create mode 100644 test/fixtures/deploy-rails-7.0/public/apple-touch-icon.png create mode 100644 test/fixtures/deploy-rails-7.0/public/favicon.ico create mode 100644 test/fixtures/deploy-rails-7.0/public/robots.txt create mode 100644 test/fixtures/deploy-rails-7.0/storage/.keep create mode 100644 test/fixtures/deploy-rails-7.0/test/application_system_test_case.rb create mode 100644 test/fixtures/deploy-rails-7.0/test/channels/application_cable/connection_test.rb create mode 100644 test/fixtures/deploy-rails-7.0/test/controllers/.keep create mode 100644 test/fixtures/deploy-rails-7.0/test/fixtures/files/.keep create mode 100644 test/fixtures/deploy-rails-7.0/test/helpers/.keep create mode 100644 test/fixtures/deploy-rails-7.0/test/integration/.keep create mode 100644 test/fixtures/deploy-rails-7.0/test/mailers/.keep create mode 100644 test/fixtures/deploy-rails-7.0/test/models/.keep create mode 100644 test/fixtures/deploy-rails-7.0/test/system/.keep create mode 100644 test/fixtures/deploy-rails-7.0/test/test_helper.rb create mode 100644 test/fixtures/deploy-rails-7.0/vendor/.keep create mode 100644 test/fixtures/deploy-rails-7.0/vendor/javascript/.keep create mode 100644 test/fixtures/deploy-rails-7.2/.dockerignore create mode 100644 test/fixtures/deploy-rails-7.2/.gitattributes create mode 100644 test/fixtures/deploy-rails-7.2/.github/dependabot.yml create mode 100644 test/fixtures/deploy-rails-7.2/.github/workflows/ci.yml create mode 100644 test/fixtures/deploy-rails-7.2/.gitignore create mode 100644 test/fixtures/deploy-rails-7.2/.rubocop.yml create mode 100644 test/fixtures/deploy-rails-7.2/.ruby-version create mode 100644 test/fixtures/deploy-rails-7.2/Dockerfile create mode 100644 test/fixtures/deploy-rails-7.2/Gemfile create mode 100644 test/fixtures/deploy-rails-7.2/Gemfile.lock create mode 100644 test/fixtures/deploy-rails-7.2/README.md create mode 100644 test/fixtures/deploy-rails-7.2/Rakefile create mode 100644 test/fixtures/deploy-rails-7.2/app/assets/config/manifest.js create mode 100644 test/fixtures/deploy-rails-7.2/app/assets/images/.keep create mode 100644 test/fixtures/deploy-rails-7.2/app/assets/stylesheets/application.css create mode 100644 test/fixtures/deploy-rails-7.2/app/channels/application_cable/channel.rb create mode 100644 test/fixtures/deploy-rails-7.2/app/channels/application_cable/connection.rb create mode 100644 test/fixtures/deploy-rails-7.2/app/controllers/application_controller.rb create mode 100644 test/fixtures/deploy-rails-7.2/app/controllers/concerns/.keep create mode 100644 test/fixtures/deploy-rails-7.2/app/helpers/application_helper.rb create mode 100644 test/fixtures/deploy-rails-7.2/app/javascript/application.js create mode 100644 test/fixtures/deploy-rails-7.2/app/javascript/controllers/application.js create mode 100644 test/fixtures/deploy-rails-7.2/app/javascript/controllers/hello_controller.js create mode 100644 test/fixtures/deploy-rails-7.2/app/javascript/controllers/index.js create mode 100644 test/fixtures/deploy-rails-7.2/app/jobs/application_job.rb create mode 100644 test/fixtures/deploy-rails-7.2/app/mailers/application_mailer.rb create mode 100644 test/fixtures/deploy-rails-7.2/app/models/application_record.rb create mode 100644 test/fixtures/deploy-rails-7.2/app/models/concerns/.keep create mode 100644 test/fixtures/deploy-rails-7.2/app/views/layouts/application.html.erb create mode 100644 test/fixtures/deploy-rails-7.2/app/views/layouts/mailer.html.erb create mode 100644 test/fixtures/deploy-rails-7.2/app/views/layouts/mailer.text.erb create mode 100644 test/fixtures/deploy-rails-7.2/app/views/pwa/manifest.json.erb create mode 100644 test/fixtures/deploy-rails-7.2/app/views/pwa/service-worker.js create mode 100755 test/fixtures/deploy-rails-7.2/bin/brakeman create mode 100755 test/fixtures/deploy-rails-7.2/bin/bundle create mode 100755 test/fixtures/deploy-rails-7.2/bin/docker-entrypoint create mode 100755 test/fixtures/deploy-rails-7.2/bin/importmap create mode 100755 test/fixtures/deploy-rails-7.2/bin/rails create mode 100755 test/fixtures/deploy-rails-7.2/bin/rake create mode 100755 test/fixtures/deploy-rails-7.2/bin/rubocop create mode 100755 test/fixtures/deploy-rails-7.2/bin/setup create mode 100644 test/fixtures/deploy-rails-7.2/config.ru create mode 100644 test/fixtures/deploy-rails-7.2/config/application.rb create mode 100644 test/fixtures/deploy-rails-7.2/config/boot.rb create mode 100644 test/fixtures/deploy-rails-7.2/config/cable.yml create mode 100644 test/fixtures/deploy-rails-7.2/config/credentials.yml.enc create mode 100644 test/fixtures/deploy-rails-7.2/config/database.yml create mode 100644 test/fixtures/deploy-rails-7.2/config/environment.rb create mode 100644 test/fixtures/deploy-rails-7.2/config/environments/development.rb create mode 100644 test/fixtures/deploy-rails-7.2/config/environments/production.rb create mode 100644 test/fixtures/deploy-rails-7.2/config/environments/test.rb create mode 100644 test/fixtures/deploy-rails-7.2/config/importmap.rb create mode 100644 test/fixtures/deploy-rails-7.2/config/initializers/assets.rb create mode 100644 test/fixtures/deploy-rails-7.2/config/initializers/content_security_policy.rb create mode 100644 test/fixtures/deploy-rails-7.2/config/initializers/filter_parameter_logging.rb create mode 100644 test/fixtures/deploy-rails-7.2/config/initializers/inflections.rb create mode 100644 test/fixtures/deploy-rails-7.2/config/initializers/permissions_policy.rb create mode 100644 test/fixtures/deploy-rails-7.2/config/locales/en.yml create mode 100644 test/fixtures/deploy-rails-7.2/config/puma.rb create mode 100644 test/fixtures/deploy-rails-7.2/config/routes.rb create mode 100644 test/fixtures/deploy-rails-7.2/config/storage.yml create mode 100644 test/fixtures/deploy-rails-7.2/db/seeds.rb create mode 100644 test/fixtures/deploy-rails-7.2/lib/assets/.keep create mode 100644 test/fixtures/deploy-rails-7.2/lib/tasks/.keep create mode 100644 test/fixtures/deploy-rails-7.2/log/.keep create mode 100644 test/fixtures/deploy-rails-7.2/public/404.html create mode 100644 test/fixtures/deploy-rails-7.2/public/406-unsupported-browser.html create mode 100644 test/fixtures/deploy-rails-7.2/public/422.html create mode 100644 test/fixtures/deploy-rails-7.2/public/500.html create mode 100644 test/fixtures/deploy-rails-7.2/public/icon.png create mode 100644 test/fixtures/deploy-rails-7.2/public/icon.svg create mode 100644 test/fixtures/deploy-rails-7.2/public/robots.txt create mode 100644 test/fixtures/deploy-rails-7.2/storage/.keep create mode 100644 test/fixtures/deploy-rails-7.2/test/application_system_test_case.rb create mode 100644 test/fixtures/deploy-rails-7.2/test/channels/application_cable/connection_test.rb create mode 100644 test/fixtures/deploy-rails-7.2/test/controllers/.keep create mode 100644 test/fixtures/deploy-rails-7.2/test/fixtures/files/.keep create mode 100644 test/fixtures/deploy-rails-7.2/test/helpers/.keep create mode 100644 test/fixtures/deploy-rails-7.2/test/integration/.keep create mode 100644 test/fixtures/deploy-rails-7.2/test/mailers/.keep create mode 100644 test/fixtures/deploy-rails-7.2/test/models/.keep create mode 100644 test/fixtures/deploy-rails-7.2/test/system/.keep create mode 100644 test/fixtures/deploy-rails-7.2/test/test_helper.rb create mode 100644 test/fixtures/deploy-rails-7.2/vendor/.keep create mode 100644 test/fixtures/deploy-rails-7.2/vendor/javascript/.keep diff --git a/deploy.rb b/deploy.rb index 4be91478b7..907389a33a 100755 --- a/deploy.rb +++ b/deploy.rb @@ -157,7 +157,8 @@ RUNTIME_LANGUAGE = manifest.dig("plan", "runtime", "language") RUNTIME_VERSION = manifest.dig("plan", "runtime", "version") - DO_INSTALL_DEPS = REQUIRES_DEPENDENCIES.include?(RUNTIME_LANGUAGE) + DEPS_REQUIRED = !manifest.dig("plan", "runtime", "no_install_required") + DO_INSTALL_DEPS = DEPS_REQUIRED && REQUIRES_DEPENDENCIES.include?(RUNTIME_LANGUAGE) steps.push({id: Step::INSTALL_DEPENDENCIES, description: "Install required dependencies", async: true}) if DO_INSTALL_DEPS diff --git a/internal/command/launch/plan/plan.go b/internal/command/launch/plan/plan.go index 5e3a032862..1030104d1a 100644 --- a/internal/command/launch/plan/plan.go +++ b/internal/command/launch/plan/plan.go @@ -42,8 +42,9 @@ type LaunchPlan struct { } type RuntimeStruct struct { - Language string `json:"language"` - Version string `json:"version"` + Language string `json:"language"` + Version string `json:"version"` + NoInstallRequired bool `json:"no_install_required"` } // Guest returns the guest described by the *raw* guest fields in a Plan. diff --git a/scanner/rails.go b/scanner/rails.go index 4655a87030..584ceb1ece 100644 --- a/scanner/rails.go +++ b/scanner/rails.go @@ -34,30 +34,7 @@ func configureRails(sourceDir string, config *ScannerConfig) (*SourceInfo, error return nil, nil } - // find absolute pat to bundle, ruby executables - // see: https://tip.golang.org/doc/go1.19#os-exec-path var err error - bundle, err = exec.LookPath("bundle") - if err != nil { - if errors.Is(err, exec.ErrDot) { - bundle, err = filepath.Abs(bundle) - } - - if err != nil { - return nil, errors.Wrap(err, "failure finding bundle executable") - } - } - - ruby, err = exec.LookPath("ruby") - if err != nil { - if errors.Is(err, exec.ErrDot) { - ruby, err = filepath.Abs(ruby) - } - - if err != nil { - return nil, errors.Wrap(err, "failure finding ruby executable") - } - } s := &SourceInfo{ Family: "Rails", @@ -170,6 +147,7 @@ func configureRails(sourceDir string, config *ScannerConfig) (*SourceInfo, error s.Port = port } } + s.Runtime.NoInstallRequired = true } // master.key comes with Rails apps from v5.2 onwards, but may not be present @@ -239,14 +217,32 @@ Once ready: run 'fly deploy' to deploy your Rails app. ` } - // fetch healthcheck route in a separate thread - go func() { - ruby, err := exec.LookPath("ruby") + // find absolute pat to bundle, ruby executables + // see: https://tip.golang.org/doc/go1.19#os-exec-path + bundle, err = exec.LookPath("bundle") + if err != nil { + if errors.Is(err, exec.ErrDot) { + bundle, err = filepath.Abs(bundle) + } + if err != nil { - healthcheck_channel <- "" - return + return nil, errors.Wrap(err, "failure finding bundle executable") } + } + ruby, err = exec.LookPath("ruby") + if err != nil { + if errors.Is(err, exec.ErrDot) { + ruby, err = filepath.Abs(ruby) + } + + if err != nil { + return nil, errors.Wrap(err, "failure finding ruby executable") + } + } + + // fetch healthcheck route in a separate thread + go func() { out, err := exec.Command(ruby, binrails, "runner", "puts Rails.application.routes.url_helpers.rails_health_check_path").Output() @@ -271,65 +267,69 @@ func RailsCallback(appName string, srcInfo *SourceInfo, plan *plan.LaunchPlan, f // install dockerfile-rails gem, if not already included and the gem directory is writable // if an error occurrs, store it for later in pendingError - generatorInstalled := false + + var err error var pendingError error - gemfile, err := os.ReadFile("Gemfile") - if err != nil { - return errors.Wrap(err, "Failed to read Gemfile") - } else if !strings.Contains(string(gemfile), "dockerfile-rails") { - // check for writable gem installation directory - writable := false - out, err := exec.Command("gem", "environment").Output() - if err == nil { - regexp := regexp.MustCompile(`INSTALLATION DIRECTORY: (.*)\n`) - for _, match := range regexp.FindAllStringSubmatch(string(out), -1) { - // Testing to see if a directory is writable is OS dependent, so - // we use a brute force method: attempt it and see if it works. - file, err := os.CreateTemp(match[1], ".flyctl.probe") - if err == nil { - writable = true - file.Close() - defer os.Remove(file.Name()) + generatorInstalled := false + if _, err := os.Stat("Dockerfile"); err != nil { + gemfile, err := os.ReadFile("Gemfile") + if err != nil { + return errors.Wrap(err, "Failed to read Gemfile") + } else if !strings.Contains(string(gemfile), "dockerfile-rails") { + // check for writable gem installation directory + writable := false + out, err := exec.Command("gem", "environment").Output() + if err == nil { + regexp := regexp.MustCompile(`INSTALLATION DIRECTORY: (.*)\n`) + for _, match := range regexp.FindAllStringSubmatch(string(out), -1) { + // Testing to see if a directory is writable is OS dependent, so + // we use a brute force method: attempt it and see if it works. + file, err := os.CreateTemp(match[1], ".flyctl.probe") + if err == nil { + writable = true + file.Close() + defer os.Remove(file.Name()) + } } } - } - // install dockerfile-rails gem if the gem installation directory is writable - if writable { - cmd := exec.Command(bundle, "add", "dockerfile-rails", - "--optimistic", "--group", "development", "--skip-install") - cmd.Stdin = nil - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - - pendingError = cmd.Run() - if pendingError != nil { - pendingError = errors.Wrap(pendingError, "Failed to add dockerfile-rails gem") - } else { - generatorInstalled = true + // install dockerfile-rails gem if the gem installation directory is writable + if writable { + cmd := exec.Command(bundle, "add", "dockerfile-rails", + "--optimistic", "--group", "development", "--skip-install") + cmd.Stdin = nil + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + pendingError = cmd.Run() + if pendingError != nil { + pendingError = errors.Wrap(pendingError, "Failed to add dockerfile-rails gem") + } else { + generatorInstalled = true + } } + } else { + // proceed using the already installed gem + generatorInstalled = true } - } else { - // proceed using the already installed gem - generatorInstalled = true - } - cmd := exec.Command(bundle, "install", "--quiet") - cmd.Stdin = nil - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr + cmd := exec.Command(bundle, "install", "--quiet") + cmd.Stdin = nil + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr - err = cmd.Run() - if err != nil { - return errors.Wrap(pendingError, "Failed to install bundle, exiting") - } + err = cmd.Run() + if err != nil { + return errors.Wrap(pendingError, "Failed to install bundle, exiting") + } - // ensure Gemfile.lock includes the x86_64-linux platform - if out, err := exec.Command(bundle, "platform").Output(); err == nil { - if !strings.Contains(string(out), "x86_64-linux") { - cmd := exec.Command(bundle, "lock", "--add-platform", "x86_64-linux") - if err := cmd.Run(); err != nil { - return errors.Wrap(err, "Failed to add x86_64-linux platform, exiting") + // ensure Gemfile.lock includes the x86_64-linux platform + if out, err := exec.Command(bundle, "platform").Output(); err == nil { + if !strings.Contains(string(out), "x86_64-linux") { + cmd := exec.Command(bundle, "lock", "--add-platform", "x86_64-linux") + if err := cmd.Run(); err != nil { + return errors.Wrap(err, "Failed to add x86_64-linux platform, exiting") + } } } } @@ -353,42 +353,42 @@ func RailsCallback(appName string, srcInfo *SourceInfo, plan *plan.LaunchPlan, f } } - // base generate command - args := []string{binrails, "generate", "dockerfile", - "--label=fly_launch_runtime:rails"} + // run command if the generator is available + if generatorInstalled { + // base generate command + args := []string{binrails, "generate", "dockerfile", + "--label=fly_launch_runtime:rails"} - // skip prompt to replace files if Dockerfile already exists - _, err = os.Stat("Dockerfile") - if !errors.Is(err, fs.ErrNotExist) { - args = append(args, "--skip") + // skip prompt to replace files if Dockerfile already exists + _, err = os.Stat("Dockerfile") + if !errors.Is(err, fs.ErrNotExist) { + args = append(args, "--skip") - if !generatorInstalled { - return errors.Wrap(pendingError, "No Dockerfile found") + if !generatorInstalled { + return errors.Wrap(pendingError, "No Dockerfile found") + } } - } - // add postgres - if plan.Postgres.Provider() != nil { - args = append(args, "--postgresql", "--no-prepare") - } + // add postgres + if plan.Postgres.Provider() != nil { + args = append(args, "--postgresql", "--no-prepare") + } - // add redis - if plan.Redis.Provider() != nil { - args = append(args, "--redis") - } + // add redis + if plan.Redis.Provider() != nil { + args = append(args, "--redis") + } - // add object storage - if plan.ObjectStorage.Provider() != nil { - args = append(args, "--tigris") - } + // add object storage + if plan.ObjectStorage.Provider() != nil { + args = append(args, "--tigris") + } - // add additional flags from launch command - if len(flags) > 0 { - args = append(args, flags...) - } + // add additional flags from launch command + if len(flags) > 0 { + args = append(args, flags...) + } - // run command if the generator is available - if generatorInstalled { fmt.Printf("Running: %s\n", strings.Join(args, " ")) cmd := exec.Command(ruby, args...) cmd.Stdin = os.Stdin @@ -419,7 +419,7 @@ The following comand can be used to update your Dockerfile: // read dockerfile dockerfile, err := os.ReadFile("Dockerfile") if err == nil { - if pendingError != nil { + if generatorInstalled && pendingError != nil { // generator may have failed, but Dockerfile was created - warn user fmt.Println("Error running dockerfile generator:", pendingError) } @@ -444,6 +444,10 @@ The following comand can be used to update your Dockerfile: } } + if srcInfo.DatabaseDesired == DatabaseKindSqlite { + + } + // add HealthCheck (if found) srcInfo.HttpCheckPath = <-healthcheck_channel if srcInfo.HttpCheckPath != "" { diff --git a/scanner/scanner.go b/scanner/scanner.go index 64b8b0f16a..017255fff5 100644 --- a/scanner/scanner.go +++ b/scanner/scanner.go @@ -112,7 +112,6 @@ type GitHubActionsStruct struct { func Scan(sourceDir string, config *ScannerConfig) (*SourceInfo, error) { scanners := []sourceScanner{ - configureDockerfile, configureDjango, configureLaravel, configurePhoenix, @@ -122,6 +121,7 @@ func Scan(sourceDir string, config *ScannerConfig) (*SourceInfo, error) { /* frameworks scanners are placed before generic scanners, since they might mix languages or have a Dockerfile that doesn't work with Fly */ + configureDockerfile, configureBridgetown, configureLucky, configureRuby, diff --git a/test/deployer/deployer_test.go b/test/deployer/deployer_test.go index 998439c9a6..4efaf01fa2 100644 --- a/test/deployer/deployer_test.go +++ b/test/deployer/deployer_test.go @@ -6,7 +6,6 @@ package deployer import ( "context" "fmt" - "os" "testing" "github.com/stretchr/testify/require" @@ -158,9 +157,9 @@ func TestLaunchGoFromRepo(t *testing.T) { require.Contains(t, string(body), "I'm running in the yyz region") } -func TestLaunchRails8(t *testing.T) { +func TestLaunchRails70(t *testing.T) { deploy := testDeployer(t, - withFixtureApp("deploy-rails-8"), + withFixtureApp("deploy-rails-7.0"), createRandomApp, testlib.WithoutCustomize, testlib.WithouExtensions, @@ -173,24 +172,59 @@ func TestLaunchRails8(t *testing.T) { require.NoError(t, err) require.NotNil(t, manifest) - require.Equal(t, manifest.Plan.Runtime.Language, "ruby") + require.Equal(t, "ruby", manifest.Plan.Runtime.Language) appName := deploy.Extra["appName"].(string) _, err = testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev/up", appName)) require.NoError(t, err) +} - if entries, err := os.ReadDir(fmt.Sprintf("%s/tmp", deploy.WorkDir())); err == nil { - for _, entry := range entries { - var mode = entry.Type() - info, _ := entry.Info() - if info != nil { - mode = info.Mode() - } - fmt.Printf("entry: %s (%s)\n", entry.Name(), mode) - } - } +func TestLaunchRails72(t *testing.T) { + deploy := testDeployer(t, + withFixtureApp("deploy-rails-7.2"), + createRandomApp, + testlib.WithoutCustomize, + testlib.WithouExtensions, + testlib.DeployNow, + withWorkDirAppSource, + testlib.CleanupBeforeExit, + ) + + manifest, err := deploy.Output().ArtifactManifest() + require.NoError(t, err) + require.NotNil(t, manifest) + require.Equal(t, "ruby", manifest.Plan.Runtime.Language) + + appName := deploy.Extra["appName"].(string) + + _, err = testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev/up", appName)) + require.NoError(t, err) +} + +func TestLaunchRails8(t *testing.T) { + deploy := testDeployer(t, + withFixtureApp("deploy-rails-8"), + createRandomApp, + testlib.WithoutCustomize, + testlib.WithouExtensions, + testlib.DeployNow, + withWorkDirAppSource, + testlib.CleanupBeforeExit, + ) + + manifest, err := deploy.Output().ArtifactManifest() + require.NoError(t, err) + require.NotNil(t, manifest) + + require.Equal(t, "ruby", manifest.Plan.Runtime.Language) + require.Equal(t, "Rails", manifest.Plan.ScannerFamily) + + appName := deploy.Extra["appName"].(string) + + _, err = testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev/up", appName)) + require.NoError(t, err) } func createRandomApp(d *testlib.DeployTestRun) { diff --git a/test/fixtures/deploy-rails-7.0/.gitattributes b/test/fixtures/deploy-rails-7.0/.gitattributes new file mode 100644 index 0000000000..31eeee0b6a --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/.gitattributes @@ -0,0 +1,7 @@ +# See https://git-scm.com/docs/gitattributes for more about git attribute files. + +# Mark the database schema as having been generated. +db/schema.rb linguist-generated + +# Mark any vendored files as having been vendored. +vendor/* linguist-vendored diff --git a/test/fixtures/deploy-rails-7.0/.gitignore b/test/fixtures/deploy-rails-7.0/.gitignore new file mode 100644 index 0000000000..886f714b42 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/.gitignore @@ -0,0 +1,35 @@ +# See https://help.github.com/articles/ignoring-files for more about ignoring files. +# +# If you find yourself ignoring temporary files generated by your text editor +# or operating system, you probably want to add a global ignore instead: +# git config --global core.excludesfile '~/.gitignore_global' + +# Ignore bundler config. +/.bundle + +# Ignore the default SQLite database. +/db/*.sqlite3 +/db/*.sqlite3-* + +# Ignore all logfiles and tempfiles. +/log/* +/tmp/* +!/log/.keep +!/tmp/.keep + +# Ignore pidfiles, but keep the directory. +/tmp/pids/* +!/tmp/pids/ +!/tmp/pids/.keep + +# Ignore uploaded files in development. +/storage/* +!/storage/.keep +/tmp/storage/* +!/tmp/storage/ +!/tmp/storage/.keep + +/public/assets + +# Ignore master key for decrypting credentials and more. +/config/master.key diff --git a/test/fixtures/deploy-rails-7.0/.ruby-version b/test/fixtures/deploy-rails-7.0/.ruby-version new file mode 100644 index 0000000000..71e447d5b6 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/.ruby-version @@ -0,0 +1 @@ +ruby-3.1.4 diff --git a/test/fixtures/deploy-rails-7.0/Gemfile b/test/fixtures/deploy-rails-7.0/Gemfile new file mode 100644 index 0000000000..767574b311 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/Gemfile @@ -0,0 +1,72 @@ +source "https://rubygems.org" +git_source(:github) { |repo| "https://github.com/#{repo}.git" } + +ruby "3.1.4" + +# Bundle edge Rails instead: gem "rails", github: "rails/rails", branch: "main" +gem "rails", "~> 7.0.8", ">= 7.0.8.5" + +# The original asset pipeline for Rails [https://github.com/rails/sprockets-rails] +gem "sprockets-rails" + +# Use sqlite3 as the database for Active Record +gem "sqlite3", "~> 1.4" + +# Use the Puma web server [https://github.com/puma/puma] +gem "puma", "~> 5.0" + +# Use JavaScript with ESM import maps [https://github.com/rails/importmap-rails] +gem "importmap-rails" + +# Hotwire's SPA-like page accelerator [https://turbo.hotwired.dev] +gem "turbo-rails" + +# Hotwire's modest JavaScript framework [https://stimulus.hotwired.dev] +gem "stimulus-rails" + +# Build JSON APIs with ease [https://github.com/rails/jbuilder] +gem "jbuilder" + +# Use Redis adapter to run Action Cable in production +# gem "redis", "~> 4.0" + +# Use Kredis to get higher-level data types in Redis [https://github.com/rails/kredis] +# gem "kredis" + +# Use Active Model has_secure_password [https://guides.rubyonrails.org/active_model_basics.html#securepassword] +# gem "bcrypt", "~> 3.1.7" + +# Windows does not include zoneinfo files, so bundle the tzinfo-data gem +gem "tzinfo-data", platforms: %i[ mingw mswin x64_mingw jruby ] + +# Reduces boot times through caching; required in config/boot.rb +gem "bootsnap", require: false + +# Use Sass to process CSS +# gem "sassc-rails" + +# Use Active Storage variants [https://guides.rubyonrails.org/active_storage_overview.html#transforming-images] +# gem "image_processing", "~> 1.2" + +group :development, :test do + # See https://guides.rubyonrails.org/debugging_rails_applications.html#debugging-with-the-debug-gem + gem "debug", platforms: %i[ mri mingw x64_mingw ] +end + +group :development do + # Use console on exceptions pages [https://github.com/rails/web-console] + gem "web-console" + + # Add speed badges [https://github.com/MiniProfiler/rack-mini-profiler] + # gem "rack-mini-profiler" + + # Speed up commands on slow machines / big apps [https://github.com/rails/spring] + # gem "spring" +end + +group :test do + # Use system testing [https://guides.rubyonrails.org/testing.html#system-testing] + gem "capybara" + gem "selenium-webdriver" + +end diff --git a/test/fixtures/deploy-rails-7.0/Gemfile.lock b/test/fixtures/deploy-rails-7.0/Gemfile.lock new file mode 100644 index 0000000000..71fc13efa2 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/Gemfile.lock @@ -0,0 +1,239 @@ +GEM + remote: https://rubygems.org/ + specs: + actioncable (7.0.8.5) + actionpack (= 7.0.8.5) + activesupport (= 7.0.8.5) + nio4r (~> 2.0) + websocket-driver (>= 0.6.1) + actionmailbox (7.0.8.5) + actionpack (= 7.0.8.5) + activejob (= 7.0.8.5) + activerecord (= 7.0.8.5) + activestorage (= 7.0.8.5) + activesupport (= 7.0.8.5) + mail (>= 2.7.1) + net-imap + net-pop + net-smtp + actionmailer (7.0.8.5) + actionpack (= 7.0.8.5) + actionview (= 7.0.8.5) + activejob (= 7.0.8.5) + activesupport (= 7.0.8.5) + mail (~> 2.5, >= 2.5.4) + net-imap + net-pop + net-smtp + rails-dom-testing (~> 2.0) + actionpack (7.0.8.5) + actionview (= 7.0.8.5) + activesupport (= 7.0.8.5) + rack (~> 2.0, >= 2.2.4) + rack-test (>= 0.6.3) + rails-dom-testing (~> 2.0) + rails-html-sanitizer (~> 1.0, >= 1.2.0) + actiontext (7.0.8.5) + actionpack (= 7.0.8.5) + activerecord (= 7.0.8.5) + activestorage (= 7.0.8.5) + activesupport (= 7.0.8.5) + globalid (>= 0.6.0) + nokogiri (>= 1.8.5) + actionview (7.0.8.5) + activesupport (= 7.0.8.5) + builder (~> 3.1) + erubi (~> 1.4) + rails-dom-testing (~> 2.0) + rails-html-sanitizer (~> 1.1, >= 1.2.0) + activejob (7.0.8.5) + activesupport (= 7.0.8.5) + globalid (>= 0.3.6) + activemodel (7.0.8.5) + activesupport (= 7.0.8.5) + activerecord (7.0.8.5) + activemodel (= 7.0.8.5) + activesupport (= 7.0.8.5) + activestorage (7.0.8.5) + actionpack (= 7.0.8.5) + activejob (= 7.0.8.5) + activerecord (= 7.0.8.5) + activesupport (= 7.0.8.5) + marcel (~> 1.0) + mini_mime (>= 1.1.0) + activesupport (7.0.8.5) + concurrent-ruby (~> 1.0, >= 1.0.2) + i18n (>= 1.6, < 2) + minitest (>= 5.1) + tzinfo (~> 2.0) + addressable (2.8.7) + public_suffix (>= 2.0.2, < 7.0) + base64 (0.2.0) + bindex (0.8.1) + bootsnap (1.18.4) + msgpack (~> 1.2) + builder (3.3.0) + capybara (3.40.0) + addressable + matrix + mini_mime (>= 0.1.3) + nokogiri (~> 1.11) + rack (>= 1.6.0) + rack-test (>= 0.6.3) + regexp_parser (>= 1.5, < 3.0) + xpath (~> 3.2) + concurrent-ruby (1.3.4) + crass (1.0.6) + date (3.3.4) + debug (1.9.2) + irb (~> 1.10) + reline (>= 0.3.8) + erubi (1.13.0) + globalid (1.2.1) + activesupport (>= 6.1) + i18n (1.14.6) + concurrent-ruby (~> 1.0) + importmap-rails (2.0.3) + actionpack (>= 6.0.0) + activesupport (>= 6.0.0) + railties (>= 6.0.0) + io-console (0.7.2) + irb (1.14.1) + rdoc (>= 4.0.0) + reline (>= 0.4.2) + jbuilder (2.13.0) + actionview (>= 5.0.0) + activesupport (>= 5.0.0) + logger (1.6.1) + loofah (2.22.0) + crass (~> 1.0.2) + nokogiri (>= 1.12.0) + mail (2.8.1) + mini_mime (>= 0.1.1) + net-imap + net-pop + net-smtp + marcel (1.0.4) + matrix (0.4.2) + method_source (1.1.0) + mini_mime (1.1.5) + minitest (5.25.1) + msgpack (1.7.3) + net-imap (0.4.17) + date + net-protocol + net-pop (0.1.2) + net-protocol + net-protocol (0.2.2) + timeout + net-smtp (0.5.0) + net-protocol + nio4r (2.7.3) + nokogiri (1.16.7-x86_64-linux) + racc (~> 1.4) + psych (5.1.2) + stringio + public_suffix (6.0.1) + puma (5.6.9) + nio4r (~> 2.0) + racc (1.8.1) + rack (2.2.10) + rack-test (2.1.0) + rack (>= 1.3) + rails (7.0.8.5) + actioncable (= 7.0.8.5) + actionmailbox (= 7.0.8.5) + actionmailer (= 7.0.8.5) + actionpack (= 7.0.8.5) + actiontext (= 7.0.8.5) + actionview (= 7.0.8.5) + activejob (= 7.0.8.5) + activemodel (= 7.0.8.5) + activerecord (= 7.0.8.5) + activestorage (= 7.0.8.5) + activesupport (= 7.0.8.5) + bundler (>= 1.15.0) + railties (= 7.0.8.5) + rails-dom-testing (2.2.0) + activesupport (>= 5.0.0) + minitest + nokogiri (>= 1.6) + rails-html-sanitizer (1.6.0) + loofah (~> 2.21) + nokogiri (~> 1.14) + railties (7.0.8.5) + actionpack (= 7.0.8.5) + activesupport (= 7.0.8.5) + method_source + rake (>= 12.2) + thor (~> 1.0) + zeitwerk (~> 2.5) + rake (13.2.1) + rdoc (6.7.0) + psych (>= 4.0.0) + regexp_parser (2.9.2) + reline (0.5.10) + io-console (~> 0.5) + rexml (3.3.8) + rubyzip (2.3.2) + selenium-webdriver (4.25.0) + base64 (~> 0.2) + logger (~> 1.4) + rexml (~> 3.2, >= 3.2.5) + rubyzip (>= 1.2.2, < 3.0) + websocket (~> 1.0) + sprockets (4.2.1) + concurrent-ruby (~> 1.0) + rack (>= 2.2.4, < 4) + sprockets-rails (3.5.2) + actionpack (>= 6.1) + activesupport (>= 6.1) + sprockets (>= 3.0.0) + sqlite3 (1.7.3-x86_64-linux) + stimulus-rails (1.3.4) + railties (>= 6.0.0) + stringio (3.1.1) + thor (1.3.2) + timeout (0.4.1) + turbo-rails (2.0.11) + actionpack (>= 6.0.0) + railties (>= 6.0.0) + tzinfo (2.0.6) + concurrent-ruby (~> 1.0) + web-console (4.2.1) + actionview (>= 6.0.0) + activemodel (>= 6.0.0) + bindex (>= 0.4.0) + railties (>= 6.0.0) + websocket (1.2.11) + websocket-driver (0.7.6) + websocket-extensions (>= 0.1.0) + websocket-extensions (0.1.5) + xpath (3.2.0) + nokogiri (~> 1.8) + zeitwerk (2.6.18) + +PLATFORMS + x86_64-linux + +DEPENDENCIES + bootsnap + capybara + debug + importmap-rails + jbuilder + puma (~> 5.0) + rails (~> 7.0.8, >= 7.0.8.5) + selenium-webdriver + sprockets-rails + sqlite3 (~> 1.4) + stimulus-rails + turbo-rails + tzinfo-data + web-console + +RUBY VERSION + ruby 3.1.4p223 + +BUNDLED WITH + 2.3.26 diff --git a/test/fixtures/deploy-rails-7.0/README.md b/test/fixtures/deploy-rails-7.0/README.md new file mode 100644 index 0000000000..7db80e4ca1 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/README.md @@ -0,0 +1,24 @@ +# README + +This README would normally document whatever steps are necessary to get the +application up and running. + +Things you may want to cover: + +* Ruby version + +* System dependencies + +* Configuration + +* Database creation + +* Database initialization + +* How to run the test suite + +* Services (job queues, cache servers, search engines, etc.) + +* Deployment instructions + +* ... diff --git a/test/fixtures/deploy-rails-7.0/Rakefile b/test/fixtures/deploy-rails-7.0/Rakefile new file mode 100644 index 0000000000..9a5ea7383a --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/Rakefile @@ -0,0 +1,6 @@ +# Add your own tasks in files placed in lib/tasks ending in .rake, +# for example lib/tasks/capistrano.rake, and they will automatically be available to Rake. + +require_relative "config/application" + +Rails.application.load_tasks diff --git a/test/fixtures/deploy-rails-7.0/app/assets/config/manifest.js b/test/fixtures/deploy-rails-7.0/app/assets/config/manifest.js new file mode 100644 index 0000000000..ddd546a0be --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/app/assets/config/manifest.js @@ -0,0 +1,4 @@ +//= link_tree ../images +//= link_directory ../stylesheets .css +//= link_tree ../../javascript .js +//= link_tree ../../../vendor/javascript .js diff --git a/test/fixtures/deploy-rails-7.0/app/assets/images/.keep b/test/fixtures/deploy-rails-7.0/app/assets/images/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.0/app/assets/stylesheets/application.css b/test/fixtures/deploy-rails-7.0/app/assets/stylesheets/application.css new file mode 100644 index 0000000000..288b9ab718 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/app/assets/stylesheets/application.css @@ -0,0 +1,15 @@ +/* + * This is a manifest file that'll be compiled into application.css, which will include all the files + * listed below. + * + * Any CSS (and SCSS, if configured) file within this directory, lib/assets/stylesheets, or any plugin's + * vendor/assets/stylesheets directory can be referenced here using a relative path. + * + * You're free to add application-wide styles to this file and they'll appear at the bottom of the + * compiled file so the styles you add here take precedence over styles defined in any other CSS + * files in this directory. Styles in this file should be added after the last require_* statement. + * It is generally better to create a new file per style scope. + * + *= require_tree . + *= require_self + */ diff --git a/test/fixtures/deploy-rails-7.0/app/channels/application_cable/channel.rb b/test/fixtures/deploy-rails-7.0/app/channels/application_cable/channel.rb new file mode 100644 index 0000000000..d672697283 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/app/channels/application_cable/channel.rb @@ -0,0 +1,4 @@ +module ApplicationCable + class Channel < ActionCable::Channel::Base + end +end diff --git a/test/fixtures/deploy-rails-7.0/app/channels/application_cable/connection.rb b/test/fixtures/deploy-rails-7.0/app/channels/application_cable/connection.rb new file mode 100644 index 0000000000..0ff5442f47 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/app/channels/application_cable/connection.rb @@ -0,0 +1,4 @@ +module ApplicationCable + class Connection < ActionCable::Connection::Base + end +end diff --git a/test/fixtures/deploy-rails-7.0/app/controllers/application_controller.rb b/test/fixtures/deploy-rails-7.0/app/controllers/application_controller.rb new file mode 100644 index 0000000000..09705d12ab --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/app/controllers/application_controller.rb @@ -0,0 +1,2 @@ +class ApplicationController < ActionController::Base +end diff --git a/test/fixtures/deploy-rails-7.0/app/controllers/concerns/.keep b/test/fixtures/deploy-rails-7.0/app/controllers/concerns/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.0/app/helpers/application_helper.rb b/test/fixtures/deploy-rails-7.0/app/helpers/application_helper.rb new file mode 100644 index 0000000000..de6be7945c --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/app/helpers/application_helper.rb @@ -0,0 +1,2 @@ +module ApplicationHelper +end diff --git a/test/fixtures/deploy-rails-7.0/app/javascript/application.js b/test/fixtures/deploy-rails-7.0/app/javascript/application.js new file mode 100644 index 0000000000..0d7b49404c --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/app/javascript/application.js @@ -0,0 +1,3 @@ +// Configure your import map in config/importmap.rb. Read more: https://github.com/rails/importmap-rails +import "@hotwired/turbo-rails" +import "controllers" diff --git a/test/fixtures/deploy-rails-7.0/app/javascript/controllers/application.js b/test/fixtures/deploy-rails-7.0/app/javascript/controllers/application.js new file mode 100644 index 0000000000..1213e85c7a --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/app/javascript/controllers/application.js @@ -0,0 +1,9 @@ +import { Application } from "@hotwired/stimulus" + +const application = Application.start() + +// Configure Stimulus development experience +application.debug = false +window.Stimulus = application + +export { application } diff --git a/test/fixtures/deploy-rails-7.0/app/javascript/controllers/hello_controller.js b/test/fixtures/deploy-rails-7.0/app/javascript/controllers/hello_controller.js new file mode 100644 index 0000000000..5975c0789d --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/app/javascript/controllers/hello_controller.js @@ -0,0 +1,7 @@ +import { Controller } from "@hotwired/stimulus" + +export default class extends Controller { + connect() { + this.element.textContent = "Hello World!" + } +} diff --git a/test/fixtures/deploy-rails-7.0/app/javascript/controllers/index.js b/test/fixtures/deploy-rails-7.0/app/javascript/controllers/index.js new file mode 100644 index 0000000000..1156bf8362 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/app/javascript/controllers/index.js @@ -0,0 +1,4 @@ +// Import and register all your controllers from the importmap via controllers/**/*_controller +import { application } from "controllers/application" +import { eagerLoadControllersFrom } from "@hotwired/stimulus-loading" +eagerLoadControllersFrom("controllers", application) diff --git a/test/fixtures/deploy-rails-7.0/app/jobs/application_job.rb b/test/fixtures/deploy-rails-7.0/app/jobs/application_job.rb new file mode 100644 index 0000000000..d394c3d106 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/app/jobs/application_job.rb @@ -0,0 +1,7 @@ +class ApplicationJob < ActiveJob::Base + # Automatically retry jobs that encountered a deadlock + # retry_on ActiveRecord::Deadlocked + + # Most jobs are safe to ignore if the underlying records are no longer available + # discard_on ActiveJob::DeserializationError +end diff --git a/test/fixtures/deploy-rails-7.0/app/mailers/application_mailer.rb b/test/fixtures/deploy-rails-7.0/app/mailers/application_mailer.rb new file mode 100644 index 0000000000..3c34c8148f --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/app/mailers/application_mailer.rb @@ -0,0 +1,4 @@ +class ApplicationMailer < ActionMailer::Base + default from: "from@example.com" + layout "mailer" +end diff --git a/test/fixtures/deploy-rails-7.0/app/models/application_record.rb b/test/fixtures/deploy-rails-7.0/app/models/application_record.rb new file mode 100644 index 0000000000..b63caeb8a5 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/app/models/application_record.rb @@ -0,0 +1,3 @@ +class ApplicationRecord < ActiveRecord::Base + primary_abstract_class +end diff --git a/test/fixtures/deploy-rails-7.0/app/models/concerns/.keep b/test/fixtures/deploy-rails-7.0/app/models/concerns/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.0/app/views/layouts/application.html.erb b/test/fixtures/deploy-rails-7.0/app/views/layouts/application.html.erb new file mode 100644 index 0000000000..0ae766aeda --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/app/views/layouts/application.html.erb @@ -0,0 +1,16 @@ + + + + DeployRails70 + + <%= csrf_meta_tags %> + <%= csp_meta_tag %> + + <%= stylesheet_link_tag "application", "data-turbo-track": "reload" %> + <%= javascript_importmap_tags %> + + + + <%= yield %> + + diff --git a/test/fixtures/deploy-rails-7.0/app/views/layouts/mailer.html.erb b/test/fixtures/deploy-rails-7.0/app/views/layouts/mailer.html.erb new file mode 100644 index 0000000000..cbd34d2e9d --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/app/views/layouts/mailer.html.erb @@ -0,0 +1,13 @@ + + + + + + + + + <%= yield %> + + diff --git a/test/fixtures/deploy-rails-7.0/app/views/layouts/mailer.text.erb b/test/fixtures/deploy-rails-7.0/app/views/layouts/mailer.text.erb new file mode 100644 index 0000000000..37f0bddbd7 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/app/views/layouts/mailer.text.erb @@ -0,0 +1 @@ +<%= yield %> diff --git a/test/fixtures/deploy-rails-7.0/bin/bundle b/test/fixtures/deploy-rails-7.0/bin/bundle new file mode 100755 index 0000000000..981e650b68 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/bin/bundle @@ -0,0 +1,114 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +# +# This file was generated by Bundler. +# +# The application 'bundle' is installed as part of a gem, and +# this file is here to facilitate running it. +# + +require "rubygems" + +m = Module.new do + module_function + + def invoked_as_script? + File.expand_path($0) == File.expand_path(__FILE__) + end + + def env_var_version + ENV["BUNDLER_VERSION"] + end + + def cli_arg_version + return unless invoked_as_script? # don't want to hijack other binstubs + return unless "update".start_with?(ARGV.first || " ") # must be running `bundle update` + bundler_version = nil + update_index = nil + ARGV.each_with_index do |a, i| + if update_index && update_index.succ == i && a =~ Gem::Version::ANCHORED_VERSION_PATTERN + bundler_version = a + end + next unless a =~ /\A--bundler(?:[= ](#{Gem::Version::VERSION_PATTERN}))?\z/ + bundler_version = $1 + update_index = i + end + bundler_version + end + + def gemfile + gemfile = ENV["BUNDLE_GEMFILE"] + return gemfile if gemfile && !gemfile.empty? + + File.expand_path("../Gemfile", __dir__) + end + + def lockfile + lockfile = + case File.basename(gemfile) + when "gems.rb" then gemfile.sub(/\.rb$/, gemfile) + else "#{gemfile}.lock" + end + File.expand_path(lockfile) + end + + def lockfile_version + return unless File.file?(lockfile) + lockfile_contents = File.read(lockfile) + return unless lockfile_contents =~ /\n\nBUNDLED WITH\n\s{2,}(#{Gem::Version::VERSION_PATTERN})\n/ + Regexp.last_match(1) + end + + def bundler_requirement + @bundler_requirement ||= + env_var_version || cli_arg_version || + bundler_requirement_for(lockfile_version) + end + + def bundler_requirement_for(version) + return "#{Gem::Requirement.default}.a" unless version + + bundler_gem_version = Gem::Version.new(version) + + requirement = bundler_gem_version.approximate_recommendation + + return requirement unless Gem.rubygems_version < Gem::Version.new("2.7.0") + + requirement += ".a" if bundler_gem_version.prerelease? + + requirement + end + + def load_bundler! + ENV["BUNDLE_GEMFILE"] ||= gemfile + + activate_bundler + end + + def activate_bundler + gem_error = activation_error_handling do + gem "bundler", bundler_requirement + end + return if gem_error.nil? + require_error = activation_error_handling do + require "bundler/version" + end + return if require_error.nil? && Gem::Requirement.new(bundler_requirement).satisfied_by?(Gem::Version.new(Bundler::VERSION)) + warn "Activating bundler (#{bundler_requirement}) failed:\n#{gem_error.message}\n\nTo install the version of bundler this project requires, run `gem install bundler -v '#{bundler_requirement}'`" + exit 42 + end + + def activation_error_handling + yield + nil + rescue StandardError, LoadError => e + e + end +end + +m.load_bundler! + +if m.invoked_as_script? + load Gem.bin_path("bundler", "bundle") +end diff --git a/test/fixtures/deploy-rails-7.0/bin/importmap b/test/fixtures/deploy-rails-7.0/bin/importmap new file mode 100755 index 0000000000..36502ab16c --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/bin/importmap @@ -0,0 +1,4 @@ +#!/usr/bin/env ruby + +require_relative "../config/application" +require "importmap/commands" diff --git a/test/fixtures/deploy-rails-7.0/bin/rails b/test/fixtures/deploy-rails-7.0/bin/rails new file mode 100755 index 0000000000..efc0377492 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/bin/rails @@ -0,0 +1,4 @@ +#!/usr/bin/env ruby +APP_PATH = File.expand_path("../config/application", __dir__) +require_relative "../config/boot" +require "rails/commands" diff --git a/test/fixtures/deploy-rails-7.0/bin/rake b/test/fixtures/deploy-rails-7.0/bin/rake new file mode 100755 index 0000000000..4fbf10b960 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/bin/rake @@ -0,0 +1,4 @@ +#!/usr/bin/env ruby +require_relative "../config/boot" +require "rake" +Rake.application.run diff --git a/test/fixtures/deploy-rails-7.0/bin/setup b/test/fixtures/deploy-rails-7.0/bin/setup new file mode 100755 index 0000000000..ec47b79b3b --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/bin/setup @@ -0,0 +1,33 @@ +#!/usr/bin/env ruby +require "fileutils" + +# path to your application root. +APP_ROOT = File.expand_path("..", __dir__) + +def system!(*args) + system(*args) || abort("\n== Command #{args} failed ==") +end + +FileUtils.chdir APP_ROOT do + # This script is a way to set up or update your development environment automatically. + # This script is idempotent, so that you can run it at any time and get an expectable outcome. + # Add necessary setup steps to this file. + + puts "== Installing dependencies ==" + system! "gem install bundler --conservative" + system("bundle check") || system!("bundle install") + + # puts "\n== Copying sample files ==" + # unless File.exist?("config/database.yml") + # FileUtils.cp "config/database.yml.sample", "config/database.yml" + # end + + puts "\n== Preparing database ==" + system! "bin/rails db:prepare" + + puts "\n== Removing old logs and tempfiles ==" + system! "bin/rails log:clear tmp:clear" + + puts "\n== Restarting application server ==" + system! "bin/rails restart" +end diff --git a/test/fixtures/deploy-rails-7.0/config.ru b/test/fixtures/deploy-rails-7.0/config.ru new file mode 100644 index 0000000000..4a3c09a688 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/config.ru @@ -0,0 +1,6 @@ +# This file is used by Rack-based servers to start the application. + +require_relative "config/environment" + +run Rails.application +Rails.application.load_server diff --git a/test/fixtures/deploy-rails-7.0/config/application.rb b/test/fixtures/deploy-rails-7.0/config/application.rb new file mode 100644 index 0000000000..3495df42c3 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/config/application.rb @@ -0,0 +1,22 @@ +require_relative "boot" + +require "rails/all" + +# Require the gems listed in Gemfile, including any gems +# you've limited to :test, :development, or :production. +Bundler.require(*Rails.groups) + +module DeployRails70 + class Application < Rails::Application + # Initialize configuration defaults for originally generated Rails version. + config.load_defaults 7.0 + + # Configuration for the application, engines, and railties goes here. + # + # These settings can be overridden in specific environments using the files + # in config/environments, which are processed later. + # + # config.time_zone = "Central Time (US & Canada)" + # config.eager_load_paths << Rails.root.join("extras") + end +end diff --git a/test/fixtures/deploy-rails-7.0/config/boot.rb b/test/fixtures/deploy-rails-7.0/config/boot.rb new file mode 100644 index 0000000000..988a5ddc46 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/config/boot.rb @@ -0,0 +1,4 @@ +ENV["BUNDLE_GEMFILE"] ||= File.expand_path("../Gemfile", __dir__) + +require "bundler/setup" # Set up gems listed in the Gemfile. +require "bootsnap/setup" # Speed up boot time by caching expensive operations. diff --git a/test/fixtures/deploy-rails-7.0/config/cable.yml b/test/fixtures/deploy-rails-7.0/config/cable.yml new file mode 100644 index 0000000000..27148bde25 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/config/cable.yml @@ -0,0 +1,10 @@ +development: + adapter: async + +test: + adapter: test + +production: + adapter: redis + url: <%= ENV.fetch("REDIS_URL") { "redis://localhost:6379/1" } %> + channel_prefix: deploy_rails_7_0_production diff --git a/test/fixtures/deploy-rails-7.0/config/credentials.yml.enc b/test/fixtures/deploy-rails-7.0/config/credentials.yml.enc new file mode 100644 index 0000000000..9b5b435f7b --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/config/credentials.yml.enc @@ -0,0 +1 @@ +hbsGC34xuHLHN4/dtXIQ3sRB/Rrr+DTA1aHx2G6MWzLS7gvH1MHCkwsjcIdTJxRVhlGGP3zxlUxoNDTjfsfoEYQlCtN2v4N10yYUsAYA86xpEk9LkRaU9uOsBanRd6e6mAhxUoA1JwR4N7gQwovNNOScenwRpS2DVP6slF908t1tV19lcx8m3DRKUFHjYIVjrDo36+PccLcUa25CY20qZOFPBGCXtF3thabUlQsNuJrscuatPH3hfnGt6cUQiXIRjvaVJFo8oZ7EhosE9TD/XhuG0QlL4VNbqyNEOTvHhV4P5iIN55p94iluA7PfPFTuPL0Z8Pg64fpF1cuyt5BFt3mnEtja9uN/HH7fYPOS8BnPwgPYZKEtbj4jGk3iIqSanp8Yvxvx+Dme/w1bY7kqaxNBGcYx0Bx5/UTH--HzmIYSvtUBvYeVKm--/QV0gNf0jH389bNku/NBEA== \ No newline at end of file diff --git a/test/fixtures/deploy-rails-7.0/config/database.yml b/test/fixtures/deploy-rails-7.0/config/database.yml new file mode 100644 index 0000000000..fcba57f19f --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/config/database.yml @@ -0,0 +1,25 @@ +# SQLite. Versions 3.8.0 and up are supported. +# gem install sqlite3 +# +# Ensure the SQLite 3 gem is defined in your Gemfile +# gem "sqlite3" +# +default: &default + adapter: sqlite3 + pool: <%= ENV.fetch("RAILS_MAX_THREADS") { 5 } %> + timeout: 5000 + +development: + <<: *default + database: db/development.sqlite3 + +# Warning: The database defined as "test" will be erased and +# re-generated from your development database when you run "rake". +# Do not set this db to the same as development or production. +test: + <<: *default + database: db/test.sqlite3 + +production: + <<: *default + database: db/production.sqlite3 diff --git a/test/fixtures/deploy-rails-7.0/config/environment.rb b/test/fixtures/deploy-rails-7.0/config/environment.rb new file mode 100644 index 0000000000..cac5315775 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/config/environment.rb @@ -0,0 +1,5 @@ +# Load the Rails application. +require_relative "application" + +# Initialize the Rails application. +Rails.application.initialize! diff --git a/test/fixtures/deploy-rails-7.0/config/environments/development.rb b/test/fixtures/deploy-rails-7.0/config/environments/development.rb new file mode 100644 index 0000000000..8500f459a8 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/config/environments/development.rb @@ -0,0 +1,70 @@ +require "active_support/core_ext/integer/time" + +Rails.application.configure do + # Settings specified here will take precedence over those in config/application.rb. + + # In the development environment your application's code is reloaded any time + # it changes. This slows down response time but is perfect for development + # since you don't have to restart the web server when you make code changes. + config.cache_classes = false + + # Do not eager load code on boot. + config.eager_load = false + + # Show full error reports. + config.consider_all_requests_local = true + + # Enable server timing + config.server_timing = true + + # Enable/disable caching. By default caching is disabled. + # Run rails dev:cache to toggle caching. + if Rails.root.join("tmp/caching-dev.txt").exist? + config.action_controller.perform_caching = true + config.action_controller.enable_fragment_cache_logging = true + + config.cache_store = :memory_store + config.public_file_server.headers = { + "Cache-Control" => "public, max-age=#{2.days.to_i}" + } + else + config.action_controller.perform_caching = false + + config.cache_store = :null_store + end + + # Store uploaded files on the local file system (see config/storage.yml for options). + config.active_storage.service = :local + + # Don't care if the mailer can't send. + config.action_mailer.raise_delivery_errors = false + + config.action_mailer.perform_caching = false + + # Print deprecation notices to the Rails logger. + config.active_support.deprecation = :log + + # Raise exceptions for disallowed deprecations. + config.active_support.disallowed_deprecation = :raise + + # Tell Active Support which deprecation messages to disallow. + config.active_support.disallowed_deprecation_warnings = [] + + # Raise an error on page load if there are pending migrations. + config.active_record.migration_error = :page_load + + # Highlight code that triggered database queries in logs. + config.active_record.verbose_query_logs = true + + # Suppress logger output for asset requests. + config.assets.quiet = true + + # Raises error for missing translations. + # config.i18n.raise_on_missing_translations = true + + # Annotate rendered view with file names. + # config.action_view.annotate_rendered_view_with_filenames = true + + # Uncomment if you wish to allow Action Cable access from any origin. + # config.action_cable.disable_request_forgery_protection = true +end diff --git a/test/fixtures/deploy-rails-7.0/config/environments/production.rb b/test/fixtures/deploy-rails-7.0/config/environments/production.rb new file mode 100644 index 0000000000..f45814d9af --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/config/environments/production.rb @@ -0,0 +1,93 @@ +require "active_support/core_ext/integer/time" + +Rails.application.configure do + # Settings specified here will take precedence over those in config/application.rb. + + # Code is not reloaded between requests. + config.cache_classes = true + + # Eager load code on boot. This eager loads most of Rails and + # your application in memory, allowing both threaded web servers + # and those relying on copy on write to perform better. + # Rake tasks automatically ignore this option for performance. + config.eager_load = true + + # Full error reports are disabled and caching is turned on. + config.consider_all_requests_local = false + config.action_controller.perform_caching = true + + # Ensures that a master key has been made available in either ENV["RAILS_MASTER_KEY"] + # or in config/master.key. This key is used to decrypt credentials (and other encrypted files). + # config.require_master_key = true + + # Disable serving static files from the `/public` folder by default since + # Apache or NGINX already handles this. + config.public_file_server.enabled = ENV["RAILS_SERVE_STATIC_FILES"].present? + + # Compress CSS using a preprocessor. + # config.assets.css_compressor = :sass + + # Do not fallback to assets pipeline if a precompiled asset is missed. + config.assets.compile = false + + # Enable serving of images, stylesheets, and JavaScripts from an asset server. + # config.asset_host = "http://assets.example.com" + + # Specifies the header that your server uses for sending files. + # config.action_dispatch.x_sendfile_header = "X-Sendfile" # for Apache + # config.action_dispatch.x_sendfile_header = "X-Accel-Redirect" # for NGINX + + # Store uploaded files on the local file system (see config/storage.yml for options). + config.active_storage.service = :local + + # Mount Action Cable outside main process or domain. + # config.action_cable.mount_path = nil + # config.action_cable.url = "wss://example.com/cable" + # config.action_cable.allowed_request_origins = [ "http://example.com", /http:\/\/example.*/ ] + + # Force all access to the app over SSL, use Strict-Transport-Security, and use secure cookies. + # config.force_ssl = true + + # Include generic and useful information about system operation, but avoid logging too much + # information to avoid inadvertent exposure of personally identifiable information (PII). + config.log_level = :info + + # Prepend all log lines with the following tags. + config.log_tags = [ :request_id ] + + # Use a different cache store in production. + # config.cache_store = :mem_cache_store + + # Use a real queuing backend for Active Job (and separate queues per environment). + # config.active_job.queue_adapter = :resque + # config.active_job.queue_name_prefix = "deploy_rails_7_0_production" + + config.action_mailer.perform_caching = false + + # Ignore bad email addresses and do not raise email delivery errors. + # Set this to true and configure the email server for immediate delivery to raise delivery errors. + # config.action_mailer.raise_delivery_errors = false + + # Enable locale fallbacks for I18n (makes lookups for any locale fall back to + # the I18n.default_locale when a translation cannot be found). + config.i18n.fallbacks = true + + # Don't log any deprecations. + config.active_support.report_deprecations = false + + # Use default logging formatter so that PID and timestamp are not suppressed. + config.log_formatter = ::Logger::Formatter.new + + # Use a different logger for distributed setups. + # require "syslog/logger" + # config.logger = ActiveSupport::TaggedLogging.new(Syslog::Logger.new "app-name") + + if ENV["RAILS_LOG_TO_STDOUT"].present? + logger = ActiveSupport::Logger.new(STDOUT) + logger.formatter = config.log_formatter + config.logger = ActiveSupport::TaggedLogging.new(logger) + end + + # Do not dump schema after migrations. + config.active_record.dump_schema_after_migration = false +end diff --git a/test/fixtures/deploy-rails-7.0/config/environments/test.rb b/test/fixtures/deploy-rails-7.0/config/environments/test.rb new file mode 100644 index 0000000000..6ea4d1e706 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/config/environments/test.rb @@ -0,0 +1,60 @@ +require "active_support/core_ext/integer/time" + +# The test environment is used exclusively to run your application's +# test suite. You never need to work with it otherwise. Remember that +# your test database is "scratch space" for the test suite and is wiped +# and recreated between test runs. Don't rely on the data there! + +Rails.application.configure do + # Settings specified here will take precedence over those in config/application.rb. + + # Turn false under Spring and add config.action_view.cache_template_loading = true. + config.cache_classes = true + + # Eager loading loads your whole application. When running a single test locally, + # this probably isn't necessary. It's a good idea to do in a continuous integration + # system, or in some way before deploying your code. + config.eager_load = ENV["CI"].present? + + # Configure public file server for tests with Cache-Control for performance. + config.public_file_server.enabled = true + config.public_file_server.headers = { + "Cache-Control" => "public, max-age=#{1.hour.to_i}" + } + + # Show full error reports and disable caching. + config.consider_all_requests_local = true + config.action_controller.perform_caching = false + config.cache_store = :null_store + + # Raise exceptions instead of rendering exception templates. + config.action_dispatch.show_exceptions = false + + # Disable request forgery protection in test environment. + config.action_controller.allow_forgery_protection = false + + # Store uploaded files on the local file system in a temporary directory. + config.active_storage.service = :test + + config.action_mailer.perform_caching = false + + # Tell Action Mailer not to deliver emails to the real world. + # The :test delivery method accumulates sent emails in the + # ActionMailer::Base.deliveries array. + config.action_mailer.delivery_method = :test + + # Print deprecation notices to the stderr. + config.active_support.deprecation = :stderr + + # Raise exceptions for disallowed deprecations. + config.active_support.disallowed_deprecation = :raise + + # Tell Active Support which deprecation messages to disallow. + config.active_support.disallowed_deprecation_warnings = [] + + # Raises error for missing translations. + # config.i18n.raise_on_missing_translations = true + + # Annotate rendered view with file names. + # config.action_view.annotate_rendered_view_with_filenames = true +end diff --git a/test/fixtures/deploy-rails-7.0/config/importmap.rb b/test/fixtures/deploy-rails-7.0/config/importmap.rb new file mode 100644 index 0000000000..909dfc542d --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/config/importmap.rb @@ -0,0 +1,7 @@ +# Pin npm packages by running ./bin/importmap + +pin "application" +pin "@hotwired/turbo-rails", to: "turbo.min.js" +pin "@hotwired/stimulus", to: "stimulus.min.js" +pin "@hotwired/stimulus-loading", to: "stimulus-loading.js" +pin_all_from "app/javascript/controllers", under: "controllers" diff --git a/test/fixtures/deploy-rails-7.0/config/initializers/assets.rb b/test/fixtures/deploy-rails-7.0/config/initializers/assets.rb new file mode 100644 index 0000000000..2eeef966fe --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/config/initializers/assets.rb @@ -0,0 +1,12 @@ +# Be sure to restart your server when you modify this file. + +# Version of your assets, change this if you want to expire all your assets. +Rails.application.config.assets.version = "1.0" + +# Add additional assets to the asset load path. +# Rails.application.config.assets.paths << Emoji.images_path + +# Precompile additional assets. +# application.js, application.css, and all non-JS/CSS in the app/assets +# folder are already added. +# Rails.application.config.assets.precompile += %w( admin.js admin.css ) diff --git a/test/fixtures/deploy-rails-7.0/config/initializers/content_security_policy.rb b/test/fixtures/deploy-rails-7.0/config/initializers/content_security_policy.rb new file mode 100644 index 0000000000..54f47cf15f --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/config/initializers/content_security_policy.rb @@ -0,0 +1,25 @@ +# Be sure to restart your server when you modify this file. + +# Define an application-wide content security policy. +# See the Securing Rails Applications Guide for more information: +# https://guides.rubyonrails.org/security.html#content-security-policy-header + +# Rails.application.configure do +# config.content_security_policy do |policy| +# policy.default_src :self, :https +# policy.font_src :self, :https, :data +# policy.img_src :self, :https, :data +# policy.object_src :none +# policy.script_src :self, :https +# policy.style_src :self, :https +# # Specify URI for violation reports +# # policy.report_uri "/csp-violation-report-endpoint" +# end +# +# # Generate session nonces for permitted importmap and inline scripts +# config.content_security_policy_nonce_generator = ->(request) { request.session.id.to_s } +# config.content_security_policy_nonce_directives = %w(script-src) +# +# # Report violations without enforcing the policy. +# # config.content_security_policy_report_only = true +# end diff --git a/test/fixtures/deploy-rails-7.0/config/initializers/filter_parameter_logging.rb b/test/fixtures/deploy-rails-7.0/config/initializers/filter_parameter_logging.rb new file mode 100644 index 0000000000..adc6568ce8 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/config/initializers/filter_parameter_logging.rb @@ -0,0 +1,8 @@ +# Be sure to restart your server when you modify this file. + +# Configure parameters to be filtered from the log file. Use this to limit dissemination of +# sensitive information. See the ActiveSupport::ParameterFilter documentation for supported +# notations and behaviors. +Rails.application.config.filter_parameters += [ + :passw, :secret, :token, :_key, :crypt, :salt, :certificate, :otp, :ssn +] diff --git a/test/fixtures/deploy-rails-7.0/config/initializers/inflections.rb b/test/fixtures/deploy-rails-7.0/config/initializers/inflections.rb new file mode 100644 index 0000000000..3860f659ea --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/config/initializers/inflections.rb @@ -0,0 +1,16 @@ +# Be sure to restart your server when you modify this file. + +# Add new inflection rules using the following format. Inflections +# are locale specific, and you may define rules for as many different +# locales as you wish. All of these examples are active by default: +# ActiveSupport::Inflector.inflections(:en) do |inflect| +# inflect.plural /^(ox)$/i, "\\1en" +# inflect.singular /^(ox)en/i, "\\1" +# inflect.irregular "person", "people" +# inflect.uncountable %w( fish sheep ) +# end + +# These inflection rules are supported but not enabled by default: +# ActiveSupport::Inflector.inflections(:en) do |inflect| +# inflect.acronym "RESTful" +# end diff --git a/test/fixtures/deploy-rails-7.0/config/initializers/permissions_policy.rb b/test/fixtures/deploy-rails-7.0/config/initializers/permissions_policy.rb new file mode 100644 index 0000000000..00f64d71b0 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/config/initializers/permissions_policy.rb @@ -0,0 +1,11 @@ +# Define an application-wide HTTP permissions policy. For further +# information see https://developers.google.com/web/updates/2018/06/feature-policy +# +# Rails.application.config.permissions_policy do |f| +# f.camera :none +# f.gyroscope :none +# f.microphone :none +# f.usb :none +# f.fullscreen :self +# f.payment :self, "https://secure.example.com" +# end diff --git a/test/fixtures/deploy-rails-7.0/config/locales/en.yml b/test/fixtures/deploy-rails-7.0/config/locales/en.yml new file mode 100644 index 0000000000..8ca56fc74f --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/config/locales/en.yml @@ -0,0 +1,33 @@ +# Files in the config/locales directory are used for internationalization +# and are automatically loaded by Rails. If you want to use locales other +# than English, add the necessary files in this directory. +# +# To use the locales, use `I18n.t`: +# +# I18n.t "hello" +# +# In views, this is aliased to just `t`: +# +# <%= t("hello") %> +# +# To use a different locale, set it with `I18n.locale`: +# +# I18n.locale = :es +# +# This would use the information in config/locales/es.yml. +# +# The following keys must be escaped otherwise they will not be retrieved by +# the default I18n backend: +# +# true, false, on, off, yes, no +# +# Instead, surround them with single quotes. +# +# en: +# "true": "foo" +# +# To learn more, please read the Rails Internationalization guide +# available at https://guides.rubyonrails.org/i18n.html. + +en: + hello: "Hello world" diff --git a/test/fixtures/deploy-rails-7.0/config/puma.rb b/test/fixtures/deploy-rails-7.0/config/puma.rb new file mode 100644 index 0000000000..daaf036999 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/config/puma.rb @@ -0,0 +1,43 @@ +# Puma can serve each request in a thread from an internal thread pool. +# The `threads` method setting takes two numbers: a minimum and maximum. +# Any libraries that use thread pools should be configured to match +# the maximum value specified for Puma. Default is set to 5 threads for minimum +# and maximum; this matches the default thread size of Active Record. +# +max_threads_count = ENV.fetch("RAILS_MAX_THREADS") { 5 } +min_threads_count = ENV.fetch("RAILS_MIN_THREADS") { max_threads_count } +threads min_threads_count, max_threads_count + +# Specifies the `worker_timeout` threshold that Puma will use to wait before +# terminating a worker in development environments. +# +worker_timeout 3600 if ENV.fetch("RAILS_ENV", "development") == "development" + +# Specifies the `port` that Puma will listen on to receive requests; default is 3000. +# +port ENV.fetch("PORT") { 3000 } + +# Specifies the `environment` that Puma will run in. +# +environment ENV.fetch("RAILS_ENV") { "development" } + +# Specifies the `pidfile` that Puma will use. +pidfile ENV.fetch("PIDFILE") { "tmp/pids/server.pid" } + +# Specifies the number of `workers` to boot in clustered mode. +# Workers are forked web server processes. If using threads and workers together +# the concurrency of the application would be max `threads` * `workers`. +# Workers do not work on JRuby or Windows (both of which do not support +# processes). +# +# workers ENV.fetch("WEB_CONCURRENCY") { 2 } + +# Use the `preload_app!` method when specifying a `workers` number. +# This directive tells Puma to first boot the application and load code +# before forking the application. This takes advantage of Copy On Write +# process behavior so workers use less memory. +# +# preload_app! + +# Allow puma to be restarted by `bin/rails restart` command. +plugin :tmp_restart diff --git a/test/fixtures/deploy-rails-7.0/config/routes.rb b/test/fixtures/deploy-rails-7.0/config/routes.rb new file mode 100644 index 0000000000..262ffd5472 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/config/routes.rb @@ -0,0 +1,6 @@ +Rails.application.routes.draw do + # Define your application routes per the DSL in https://guides.rubyonrails.org/routing.html + + # Defines the root path route ("/") + # root "articles#index" +end diff --git a/test/fixtures/deploy-rails-7.0/config/storage.yml b/test/fixtures/deploy-rails-7.0/config/storage.yml new file mode 100644 index 0000000000..4942ab6694 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/config/storage.yml @@ -0,0 +1,34 @@ +test: + service: Disk + root: <%= Rails.root.join("tmp/storage") %> + +local: + service: Disk + root: <%= Rails.root.join("storage") %> + +# Use bin/rails credentials:edit to set the AWS secrets (as aws:access_key_id|secret_access_key) +# amazon: +# service: S3 +# access_key_id: <%= Rails.application.credentials.dig(:aws, :access_key_id) %> +# secret_access_key: <%= Rails.application.credentials.dig(:aws, :secret_access_key) %> +# region: us-east-1 +# bucket: your_own_bucket-<%= Rails.env %> + +# Remember not to checkin your GCS keyfile to a repository +# google: +# service: GCS +# project: your_project +# credentials: <%= Rails.root.join("path/to/gcs.keyfile") %> +# bucket: your_own_bucket-<%= Rails.env %> + +# Use bin/rails credentials:edit to set the Azure Storage secret (as azure_storage:storage_access_key) +# microsoft: +# service: AzureStorage +# storage_account_name: your_account_name +# storage_access_key: <%= Rails.application.credentials.dig(:azure_storage, :storage_access_key) %> +# container: your_container_name-<%= Rails.env %> + +# mirror: +# service: Mirror +# primary: local +# mirrors: [ amazon, google, microsoft ] diff --git a/test/fixtures/deploy-rails-7.0/db/seeds.rb b/test/fixtures/deploy-rails-7.0/db/seeds.rb new file mode 100644 index 0000000000..bc25fce306 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/db/seeds.rb @@ -0,0 +1,7 @@ +# This file should contain all the record creation needed to seed the database with its default values. +# The data can then be loaded with the bin/rails db:seed command (or created alongside the database with db:setup). +# +# Examples: +# +# movies = Movie.create([{ name: "Star Wars" }, { name: "Lord of the Rings" }]) +# Character.create(name: "Luke", movie: movies.first) diff --git a/test/fixtures/deploy-rails-7.0/lib/assets/.keep b/test/fixtures/deploy-rails-7.0/lib/assets/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.0/lib/tasks/.keep b/test/fixtures/deploy-rails-7.0/lib/tasks/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.0/log/.keep b/test/fixtures/deploy-rails-7.0/log/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.0/public/404.html b/test/fixtures/deploy-rails-7.0/public/404.html new file mode 100644 index 0000000000..2be3af26fc --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/public/404.html @@ -0,0 +1,67 @@ + + + + The page you were looking for doesn't exist (404) + + + + + + +
+
+

The page you were looking for doesn't exist.

+

You may have mistyped the address or the page may have moved.

+
+

If you are the application owner check the logs for more information.

+
+ + diff --git a/test/fixtures/deploy-rails-7.0/public/422.html b/test/fixtures/deploy-rails-7.0/public/422.html new file mode 100644 index 0000000000..c08eac0d1d --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/public/422.html @@ -0,0 +1,67 @@ + + + + The change you wanted was rejected (422) + + + + + + +
+
+

The change you wanted was rejected.

+

Maybe you tried to change something you didn't have access to.

+
+

If you are the application owner check the logs for more information.

+
+ + diff --git a/test/fixtures/deploy-rails-7.0/public/500.html b/test/fixtures/deploy-rails-7.0/public/500.html new file mode 100644 index 0000000000..78a030af22 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/public/500.html @@ -0,0 +1,66 @@ + + + + We're sorry, but something went wrong (500) + + + + + + +
+
+

We're sorry, but something went wrong.

+
+

If you are the application owner check the logs for more information.

+
+ + diff --git a/test/fixtures/deploy-rails-7.0/public/apple-touch-icon-precomposed.png b/test/fixtures/deploy-rails-7.0/public/apple-touch-icon-precomposed.png new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.0/public/apple-touch-icon.png b/test/fixtures/deploy-rails-7.0/public/apple-touch-icon.png new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.0/public/favicon.ico b/test/fixtures/deploy-rails-7.0/public/favicon.ico new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.0/public/robots.txt b/test/fixtures/deploy-rails-7.0/public/robots.txt new file mode 100644 index 0000000000..c19f78ab68 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/public/robots.txt @@ -0,0 +1 @@ +# See https://www.robotstxt.org/robotstxt.html for documentation on how to use the robots.txt file diff --git a/test/fixtures/deploy-rails-7.0/storage/.keep b/test/fixtures/deploy-rails-7.0/storage/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.0/test/application_system_test_case.rb b/test/fixtures/deploy-rails-7.0/test/application_system_test_case.rb new file mode 100644 index 0000000000..d19212abd5 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/test/application_system_test_case.rb @@ -0,0 +1,5 @@ +require "test_helper" + +class ApplicationSystemTestCase < ActionDispatch::SystemTestCase + driven_by :selenium, using: :chrome, screen_size: [1400, 1400] +end diff --git a/test/fixtures/deploy-rails-7.0/test/channels/application_cable/connection_test.rb b/test/fixtures/deploy-rails-7.0/test/channels/application_cable/connection_test.rb new file mode 100644 index 0000000000..800405f15e --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/test/channels/application_cable/connection_test.rb @@ -0,0 +1,11 @@ +require "test_helper" + +class ApplicationCable::ConnectionTest < ActionCable::Connection::TestCase + # test "connects with cookies" do + # cookies.signed[:user_id] = 42 + # + # connect + # + # assert_equal connection.user_id, "42" + # end +end diff --git a/test/fixtures/deploy-rails-7.0/test/controllers/.keep b/test/fixtures/deploy-rails-7.0/test/controllers/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.0/test/fixtures/files/.keep b/test/fixtures/deploy-rails-7.0/test/fixtures/files/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.0/test/helpers/.keep b/test/fixtures/deploy-rails-7.0/test/helpers/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.0/test/integration/.keep b/test/fixtures/deploy-rails-7.0/test/integration/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.0/test/mailers/.keep b/test/fixtures/deploy-rails-7.0/test/mailers/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.0/test/models/.keep b/test/fixtures/deploy-rails-7.0/test/models/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.0/test/system/.keep b/test/fixtures/deploy-rails-7.0/test/system/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.0/test/test_helper.rb b/test/fixtures/deploy-rails-7.0/test/test_helper.rb new file mode 100644 index 0000000000..d713e377c9 --- /dev/null +++ b/test/fixtures/deploy-rails-7.0/test/test_helper.rb @@ -0,0 +1,13 @@ +ENV["RAILS_ENV"] ||= "test" +require_relative "../config/environment" +require "rails/test_help" + +class ActiveSupport::TestCase + # Run tests in parallel with specified workers + parallelize(workers: :number_of_processors) + + # Setup all fixtures in test/fixtures/*.yml for all tests in alphabetical order. + fixtures :all + + # Add more helper methods to be used by all tests here... +end diff --git a/test/fixtures/deploy-rails-7.0/vendor/.keep b/test/fixtures/deploy-rails-7.0/vendor/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.0/vendor/javascript/.keep b/test/fixtures/deploy-rails-7.0/vendor/javascript/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.2/.dockerignore b/test/fixtures/deploy-rails-7.2/.dockerignore new file mode 100644 index 0000000000..cd7190b40e --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/.dockerignore @@ -0,0 +1,48 @@ +# See https://docs.docker.com/engine/reference/builder/#dockerignore-file for more about ignoring files. + +# Ignore git directory. +/.git/ +/.gitignore + +# Ignore bundler config. +/.bundle + +# Ignore all environment files (except templates). +/.env* +!/.env*.erb + +# Ignore all default key files. +/config/master.key +/config/credentials/*.key + +# Ignore all logfiles and tempfiles. +/log/* +/tmp/* +!/log/.keep +!/tmp/.keep + +# Ignore pidfiles, but keep the directory. +/tmp/pids/* +!/tmp/pids/.keep + +# Ignore storage (uploaded files in development and any SQLite databases). +/storage/* +!/storage/.keep +/tmp/storage/* +!/tmp/storage/.keep + +# Ignore assets. +/node_modules/ +/app/assets/builds/* +!/app/assets/builds/.keep +/public/assets + +# Ignore CI service files. +/.github + +# Ignore development files +/.devcontainer + +# Ignore Docker-related files +/.dockerignore +/Dockerfile* diff --git a/test/fixtures/deploy-rails-7.2/.gitattributes b/test/fixtures/deploy-rails-7.2/.gitattributes new file mode 100644 index 0000000000..8dc4323435 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/.gitattributes @@ -0,0 +1,9 @@ +# See https://git-scm.com/docs/gitattributes for more about git attribute files. + +# Mark the database schema as having been generated. +db/schema.rb linguist-generated + +# Mark any vendored files as having been vendored. +vendor/* linguist-vendored +config/credentials/*.yml.enc diff=rails_credentials +config/credentials.yml.enc diff=rails_credentials diff --git a/test/fixtures/deploy-rails-7.2/.github/dependabot.yml b/test/fixtures/deploy-rails-7.2/.github/dependabot.yml new file mode 100644 index 0000000000..f0527e6be1 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/.github/dependabot.yml @@ -0,0 +1,12 @@ +version: 2 +updates: +- package-ecosystem: bundler + directory: "/" + schedule: + interval: daily + open-pull-requests-limit: 10 +- package-ecosystem: github-actions + directory: "/" + schedule: + interval: daily + open-pull-requests-limit: 10 diff --git a/test/fixtures/deploy-rails-7.2/.github/workflows/ci.yml b/test/fixtures/deploy-rails-7.2/.github/workflows/ci.yml new file mode 100644 index 0000000000..00af91f692 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/.github/workflows/ci.yml @@ -0,0 +1,90 @@ +name: CI + +on: + pull_request: + push: + branches: [ main ] + +jobs: + scan_ruby: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: .ruby-version + bundler-cache: true + + - name: Scan for common Rails security vulnerabilities using static analysis + run: bin/brakeman --no-pager + + scan_js: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: .ruby-version + bundler-cache: true + + - name: Scan for security vulnerabilities in JavaScript dependencies + run: bin/importmap audit + + lint: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: .ruby-version + bundler-cache: true + + - name: Lint code for consistent style + run: bin/rubocop -f github + + test: + runs-on: ubuntu-latest + + # services: + # redis: + # image: redis + # ports: + # - 6379:6379 + # options: --health-cmd "redis-cli ping" --health-interval 10s --health-timeout 5s --health-retries 5 + steps: + - name: Install packages + run: sudo apt-get update && sudo apt-get install --no-install-recommends -y google-chrome-stable curl libjemalloc2 libvips sqlite3 + + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: .ruby-version + bundler-cache: true + + - name: Run tests + env: + RAILS_ENV: test + # REDIS_URL: redis://localhost:6379/0 + run: bin/rails db:test:prepare test test:system + + - name: Keep screenshots from failed system tests + uses: actions/upload-artifact@v4 + if: failure() + with: + name: screenshots + path: ${{ github.workspace }}/tmp/screenshots + if-no-files-found: ignore diff --git a/test/fixtures/deploy-rails-7.2/.gitignore b/test/fixtures/deploy-rails-7.2/.gitignore new file mode 100644 index 0000000000..4aaf1022e5 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/.gitignore @@ -0,0 +1,35 @@ +# See https://help.github.com/articles/ignoring-files for more about ignoring files. +# +# Temporary files generated by your text editor or operating system +# belong in git's global ignore instead: +# `$XDG_CONFIG_HOME/git/ignore` or `~/.config/git/ignore` + +# Ignore bundler config. +/.bundle + +# Ignore all environment files (except templates). +/.env* +!/.env*.erb + +# Ignore all logfiles and tempfiles. +/log/* +/tmp/* +!/log/.keep +!/tmp/.keep + +# Ignore pidfiles, but keep the directory. +/tmp/pids/* +!/tmp/pids/ +!/tmp/pids/.keep + +# Ignore storage (uploaded files in development and any SQLite databases). +/storage/* +!/storage/.keep +/tmp/storage/* +!/tmp/storage/ +!/tmp/storage/.keep + +/public/assets + +# Ignore master key for decrypting credentials and more. +/config/master.key diff --git a/test/fixtures/deploy-rails-7.2/.rubocop.yml b/test/fixtures/deploy-rails-7.2/.rubocop.yml new file mode 100644 index 0000000000..f9d86d4a54 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/.rubocop.yml @@ -0,0 +1,8 @@ +# Omakase Ruby styling for Rails +inherit_gem: { rubocop-rails-omakase: rubocop.yml } + +# Overwrite or add rules to create your own house style +# +# # Use `[a, [b, c]]` not `[ a, [ b, c ] ]` +# Layout/SpaceInsideArrayLiteralBrackets: +# Enabled: false diff --git a/test/fixtures/deploy-rails-7.2/.ruby-version b/test/fixtures/deploy-rails-7.2/.ruby-version new file mode 100644 index 0000000000..71e447d5b6 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/.ruby-version @@ -0,0 +1 @@ +ruby-3.1.4 diff --git a/test/fixtures/deploy-rails-7.2/Dockerfile b/test/fixtures/deploy-rails-7.2/Dockerfile new file mode 100644 index 0000000000..7686971bc4 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/Dockerfile @@ -0,0 +1,69 @@ +# syntax = docker/dockerfile:1 + +# This Dockerfile is designed for production, not development. Use with Kamal or build'n'run by hand: +# docker build -t my-app . +# docker run -d -p 80:80 -p 443:443 --name my-app -e RAILS_MASTER_KEY= my-app + +# Make sure RUBY_VERSION matches the Ruby version in .ruby-version +ARG RUBY_VERSION=3.1.4 +FROM docker.io/library/ruby:$RUBY_VERSION-slim AS base + +# Rails app lives here +WORKDIR /rails + +# Install base packages +RUN apt-get update -qq && \ + apt-get install --no-install-recommends -y curl libjemalloc2 libvips sqlite3 && \ + rm -rf /var/lib/apt/lists /var/cache/apt/archives + +# Set production environment +ENV RAILS_ENV="production" \ + BUNDLE_DEPLOYMENT="1" \ + BUNDLE_PATH="/usr/local/bundle" \ + BUNDLE_WITHOUT="development" + +# Throw-away build stage to reduce size of final image +FROM base AS build + +# Install packages needed to build gems +RUN apt-get update -qq && \ + apt-get install --no-install-recommends -y build-essential git pkg-config && \ + rm -rf /var/lib/apt/lists /var/cache/apt/archives + +# Install application gems +COPY Gemfile Gemfile.lock ./ +RUN bundle install && \ + rm -rf ~/.bundle/ "${BUNDLE_PATH}"/ruby/*/cache "${BUNDLE_PATH}"/ruby/*/bundler/gems/*/.git && \ + bundle exec bootsnap precompile --gemfile + +# Copy application code +COPY . . + +# Precompile bootsnap code for faster boot times +RUN bundle exec bootsnap precompile app/ lib/ + +# Precompiling assets for production without requiring secret RAILS_MASTER_KEY +RUN SECRET_KEY_BASE_DUMMY=1 ./bin/rails assets:precompile + + + + +# Final stage for app image +FROM base + +# Copy built artifacts: gems, application +COPY --from=build "${BUNDLE_PATH}" "${BUNDLE_PATH}" +COPY --from=build /rails /rails + +# Run and own only the runtime files as a non-root user for security +RUN groupadd --system --gid 1000 rails && \ + useradd rails --uid 1000 --gid 1000 --create-home --shell /bin/bash && \ + chown -R rails:rails db log storage tmp +USER 1000:1000 + +# Entrypoint prepares the database. +ENTRYPOINT ["/rails/bin/docker-entrypoint"] + +# Start the server by default, this can be overwritten at runtime +EXPOSE 3000 +CMD ["./bin/rails", "server"] diff --git a/test/fixtures/deploy-rails-7.2/Gemfile b/test/fixtures/deploy-rails-7.2/Gemfile new file mode 100644 index 0000000000..6150aba327 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/Gemfile @@ -0,0 +1,60 @@ +source "https://rubygems.org" + +# Bundle edge Rails instead: gem "rails", github: "rails/rails", branch: "main" +gem "rails", "~> 7.2.1", ">= 7.2.1.1" +# The original asset pipeline for Rails [https://github.com/rails/sprockets-rails] +gem "sprockets-rails" +# Use sqlite3 as the database for Active Record +gem "sqlite3", ">= 1.4" +# Use the Puma web server [https://github.com/puma/puma] +gem "puma", ">= 5.0" +# Use JavaScript with ESM import maps [https://github.com/rails/importmap-rails] +gem "importmap-rails" +# Hotwire's SPA-like page accelerator [https://turbo.hotwired.dev] +gem "turbo-rails" +# Hotwire's modest JavaScript framework [https://stimulus.hotwired.dev] +gem "stimulus-rails" +# Build JSON APIs with ease [https://github.com/rails/jbuilder] +gem "jbuilder" +# Use Redis adapter to run Action Cable in production +# gem "redis", ">= 4.0.1" + +# Use Kredis to get higher-level data types in Redis [https://github.com/rails/kredis] +# gem "kredis" + +# Use Active Model has_secure_password [https://guides.rubyonrails.org/active_model_basics.html#securepassword] +# gem "bcrypt", "~> 3.1.7" + +# Windows does not include zoneinfo files, so bundle the tzinfo-data gem +gem "tzinfo-data", platforms: %i[ windows jruby ] + +# Reduces boot times through caching; required in config/boot.rb +gem "bootsnap", require: false + +# Use Active Storage variants [https://guides.rubyonrails.org/active_storage_overview.html#transforming-images] +# gem "image_processing", "~> 1.2" + +group :development, :test do + # See https://guides.rubyonrails.org/debugging_rails_applications.html#debugging-with-the-debug-gem + gem "debug", platforms: %i[ mri windows ], require: "debug/prelude" + + # Static analysis for security vulnerabilities [https://brakemanscanner.org/] + gem "brakeman", require: false + + # Omakase Ruby styling [https://github.com/rails/rubocop-rails-omakase/] + gem "rubocop-rails-omakase", require: false +end + +group :development do + # Use console on exceptions pages [https://github.com/rails/web-console] + gem "web-console" + + # Highlight the fine-grained location where an error occurred [https://github.com/ruby/error_highlight] + gem "error_highlight", ">= 0.4.0", platforms: [:ruby] +end + +group :test do + # Use system testing [https://guides.rubyonrails.org/testing.html#system-testing] + gem "capybara" + gem "selenium-webdriver" +end diff --git a/test/fixtures/deploy-rails-7.2/Gemfile.lock b/test/fixtures/deploy-rails-7.2/Gemfile.lock new file mode 100644 index 0000000000..97f8dcf52a --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/Gemfile.lock @@ -0,0 +1,296 @@ +GEM + remote: https://rubygems.org/ + specs: + actioncable (7.2.1.1) + actionpack (= 7.2.1.1) + activesupport (= 7.2.1.1) + nio4r (~> 2.0) + websocket-driver (>= 0.6.1) + zeitwerk (~> 2.6) + actionmailbox (7.2.1.1) + actionpack (= 7.2.1.1) + activejob (= 7.2.1.1) + activerecord (= 7.2.1.1) + activestorage (= 7.2.1.1) + activesupport (= 7.2.1.1) + mail (>= 2.8.0) + actionmailer (7.2.1.1) + actionpack (= 7.2.1.1) + actionview (= 7.2.1.1) + activejob (= 7.2.1.1) + activesupport (= 7.2.1.1) + mail (>= 2.8.0) + rails-dom-testing (~> 2.2) + actionpack (7.2.1.1) + actionview (= 7.2.1.1) + activesupport (= 7.2.1.1) + nokogiri (>= 1.8.5) + racc + rack (>= 2.2.4, < 3.2) + rack-session (>= 1.0.1) + rack-test (>= 0.6.3) + rails-dom-testing (~> 2.2) + rails-html-sanitizer (~> 1.6) + useragent (~> 0.16) + actiontext (7.2.1.1) + actionpack (= 7.2.1.1) + activerecord (= 7.2.1.1) + activestorage (= 7.2.1.1) + activesupport (= 7.2.1.1) + globalid (>= 0.6.0) + nokogiri (>= 1.8.5) + actionview (7.2.1.1) + activesupport (= 7.2.1.1) + builder (~> 3.1) + erubi (~> 1.11) + rails-dom-testing (~> 2.2) + rails-html-sanitizer (~> 1.6) + activejob (7.2.1.1) + activesupport (= 7.2.1.1) + globalid (>= 0.3.6) + activemodel (7.2.1.1) + activesupport (= 7.2.1.1) + activerecord (7.2.1.1) + activemodel (= 7.2.1.1) + activesupport (= 7.2.1.1) + timeout (>= 0.4.0) + activestorage (7.2.1.1) + actionpack (= 7.2.1.1) + activejob (= 7.2.1.1) + activerecord (= 7.2.1.1) + activesupport (= 7.2.1.1) + marcel (~> 1.0) + activesupport (7.2.1.1) + base64 + bigdecimal + concurrent-ruby (~> 1.0, >= 1.3.1) + connection_pool (>= 2.2.5) + drb + i18n (>= 1.6, < 2) + logger (>= 1.4.2) + minitest (>= 5.1) + securerandom (>= 0.3) + tzinfo (~> 2.0, >= 2.0.5) + addressable (2.8.7) + public_suffix (>= 2.0.2, < 7.0) + ast (2.4.2) + base64 (0.2.0) + bigdecimal (3.1.8) + bindex (0.8.1) + bootsnap (1.18.4) + msgpack (~> 1.2) + brakeman (6.2.1) + racc + builder (3.3.0) + capybara (3.40.0) + addressable + matrix + mini_mime (>= 0.1.3) + nokogiri (~> 1.11) + rack (>= 1.6.0) + rack-test (>= 0.6.3) + regexp_parser (>= 1.5, < 3.0) + xpath (~> 3.2) + concurrent-ruby (1.3.4) + connection_pool (2.4.1) + crass (1.0.6) + date (3.3.4) + debug (1.9.2) + irb (~> 1.10) + reline (>= 0.3.8) + drb (2.2.1) + error_highlight (0.6.0) + erubi (1.13.0) + globalid (1.2.1) + activesupport (>= 6.1) + i18n (1.14.6) + concurrent-ruby (~> 1.0) + importmap-rails (2.0.3) + actionpack (>= 6.0.0) + activesupport (>= 6.0.0) + railties (>= 6.0.0) + io-console (0.7.2) + irb (1.14.1) + rdoc (>= 4.0.0) + reline (>= 0.4.2) + jbuilder (2.13.0) + actionview (>= 5.0.0) + activesupport (>= 5.0.0) + json (2.7.2) + language_server-protocol (3.17.0.3) + logger (1.6.1) + loofah (2.22.0) + crass (~> 1.0.2) + nokogiri (>= 1.12.0) + mail (2.8.1) + mini_mime (>= 0.1.1) + net-imap + net-pop + net-smtp + marcel (1.0.4) + matrix (0.4.2) + mini_mime (1.1.5) + minitest (5.25.1) + msgpack (1.7.3) + net-imap (0.4.17) + date + net-protocol + net-pop (0.1.2) + net-protocol + net-protocol (0.2.2) + timeout + net-smtp (0.5.0) + net-protocol + nio4r (2.7.3) + nokogiri (1.16.7-x86_64-linux) + racc (~> 1.4) + parallel (1.26.3) + parser (3.3.5.0) + ast (~> 2.4.1) + racc + psych (5.1.2) + stringio + public_suffix (6.0.1) + puma (6.4.3) + nio4r (~> 2.0) + racc (1.8.1) + rack (3.1.8) + rack-session (2.0.0) + rack (>= 3.0.0) + rack-test (2.1.0) + rack (>= 1.3) + rackup (2.1.0) + rack (>= 3) + webrick (~> 1.8) + rails (7.2.1.1) + actioncable (= 7.2.1.1) + actionmailbox (= 7.2.1.1) + actionmailer (= 7.2.1.1) + actionpack (= 7.2.1.1) + actiontext (= 7.2.1.1) + actionview (= 7.2.1.1) + activejob (= 7.2.1.1) + activemodel (= 7.2.1.1) + activerecord (= 7.2.1.1) + activestorage (= 7.2.1.1) + activesupport (= 7.2.1.1) + bundler (>= 1.15.0) + railties (= 7.2.1.1) + rails-dom-testing (2.2.0) + activesupport (>= 5.0.0) + minitest + nokogiri (>= 1.6) + rails-html-sanitizer (1.6.0) + loofah (~> 2.21) + nokogiri (~> 1.14) + railties (7.2.1.1) + actionpack (= 7.2.1.1) + activesupport (= 7.2.1.1) + irb (~> 1.13) + rackup (>= 1.0.0) + rake (>= 12.2) + thor (~> 1.0, >= 1.2.2) + zeitwerk (~> 2.6) + rainbow (3.1.1) + rake (13.2.1) + rdoc (6.7.0) + psych (>= 4.0.0) + regexp_parser (2.9.2) + reline (0.5.10) + io-console (~> 0.5) + rexml (3.3.8) + rubocop (1.67.0) + json (~> 2.3) + language_server-protocol (>= 3.17.0) + parallel (~> 1.10) + parser (>= 3.3.0.2) + rainbow (>= 2.2.2, < 4.0) + regexp_parser (>= 2.4, < 3.0) + rubocop-ast (>= 1.32.2, < 2.0) + ruby-progressbar (~> 1.7) + unicode-display_width (>= 2.4.0, < 3.0) + rubocop-ast (1.32.3) + parser (>= 3.3.1.0) + rubocop-minitest (0.36.0) + rubocop (>= 1.61, < 2.0) + rubocop-ast (>= 1.31.1, < 2.0) + rubocop-performance (1.22.1) + rubocop (>= 1.48.1, < 2.0) + rubocop-ast (>= 1.31.1, < 2.0) + rubocop-rails (2.26.2) + activesupport (>= 4.2.0) + rack (>= 1.1) + rubocop (>= 1.52.0, < 2.0) + rubocop-ast (>= 1.31.1, < 2.0) + rubocop-rails-omakase (1.0.0) + rubocop + rubocop-minitest + rubocop-performance + rubocop-rails + ruby-progressbar (1.13.0) + rubyzip (2.3.2) + securerandom (0.3.1) + selenium-webdriver (4.25.0) + base64 (~> 0.2) + logger (~> 1.4) + rexml (~> 3.2, >= 3.2.5) + rubyzip (>= 1.2.2, < 3.0) + websocket (~> 1.0) + sprockets (4.2.1) + concurrent-ruby (~> 1.0) + rack (>= 2.2.4, < 4) + sprockets-rails (3.5.2) + actionpack (>= 6.1) + activesupport (>= 6.1) + sprockets (>= 3.0.0) + sqlite3 (2.1.0-x86_64-linux-gnu) + stimulus-rails (1.3.4) + railties (>= 6.0.0) + stringio (3.1.1) + thor (1.3.2) + timeout (0.4.1) + turbo-rails (2.0.11) + actionpack (>= 6.0.0) + railties (>= 6.0.0) + tzinfo (2.0.6) + concurrent-ruby (~> 1.0) + unicode-display_width (2.6.0) + useragent (0.16.10) + web-console (4.2.1) + actionview (>= 6.0.0) + activemodel (>= 6.0.0) + bindex (>= 0.4.0) + railties (>= 6.0.0) + webrick (1.8.2) + websocket (1.2.11) + websocket-driver (0.7.6) + websocket-extensions (>= 0.1.0) + websocket-extensions (0.1.5) + xpath (3.2.0) + nokogiri (~> 1.8) + zeitwerk (2.6.18) + +PLATFORMS + x86_64-linux + +DEPENDENCIES + bootsnap + brakeman + capybara + debug + error_highlight (>= 0.4.0) + importmap-rails + jbuilder + puma (>= 5.0) + rails (~> 7.2.1, >= 7.2.1.1) + rubocop-rails-omakase + selenium-webdriver + sprockets-rails + sqlite3 (>= 1.4) + stimulus-rails + turbo-rails + tzinfo-data + web-console + +BUNDLED WITH + 2.3.26 diff --git a/test/fixtures/deploy-rails-7.2/README.md b/test/fixtures/deploy-rails-7.2/README.md new file mode 100644 index 0000000000..7db80e4ca1 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/README.md @@ -0,0 +1,24 @@ +# README + +This README would normally document whatever steps are necessary to get the +application up and running. + +Things you may want to cover: + +* Ruby version + +* System dependencies + +* Configuration + +* Database creation + +* Database initialization + +* How to run the test suite + +* Services (job queues, cache servers, search engines, etc.) + +* Deployment instructions + +* ... diff --git a/test/fixtures/deploy-rails-7.2/Rakefile b/test/fixtures/deploy-rails-7.2/Rakefile new file mode 100644 index 0000000000..9a5ea7383a --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/Rakefile @@ -0,0 +1,6 @@ +# Add your own tasks in files placed in lib/tasks ending in .rake, +# for example lib/tasks/capistrano.rake, and they will automatically be available to Rake. + +require_relative "config/application" + +Rails.application.load_tasks diff --git a/test/fixtures/deploy-rails-7.2/app/assets/config/manifest.js b/test/fixtures/deploy-rails-7.2/app/assets/config/manifest.js new file mode 100644 index 0000000000..ddd546a0be --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/app/assets/config/manifest.js @@ -0,0 +1,4 @@ +//= link_tree ../images +//= link_directory ../stylesheets .css +//= link_tree ../../javascript .js +//= link_tree ../../../vendor/javascript .js diff --git a/test/fixtures/deploy-rails-7.2/app/assets/images/.keep b/test/fixtures/deploy-rails-7.2/app/assets/images/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.2/app/assets/stylesheets/application.css b/test/fixtures/deploy-rails-7.2/app/assets/stylesheets/application.css new file mode 100644 index 0000000000..288b9ab718 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/app/assets/stylesheets/application.css @@ -0,0 +1,15 @@ +/* + * This is a manifest file that'll be compiled into application.css, which will include all the files + * listed below. + * + * Any CSS (and SCSS, if configured) file within this directory, lib/assets/stylesheets, or any plugin's + * vendor/assets/stylesheets directory can be referenced here using a relative path. + * + * You're free to add application-wide styles to this file and they'll appear at the bottom of the + * compiled file so the styles you add here take precedence over styles defined in any other CSS + * files in this directory. Styles in this file should be added after the last require_* statement. + * It is generally better to create a new file per style scope. + * + *= require_tree . + *= require_self + */ diff --git a/test/fixtures/deploy-rails-7.2/app/channels/application_cable/channel.rb b/test/fixtures/deploy-rails-7.2/app/channels/application_cable/channel.rb new file mode 100644 index 0000000000..d672697283 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/app/channels/application_cable/channel.rb @@ -0,0 +1,4 @@ +module ApplicationCable + class Channel < ActionCable::Channel::Base + end +end diff --git a/test/fixtures/deploy-rails-7.2/app/channels/application_cable/connection.rb b/test/fixtures/deploy-rails-7.2/app/channels/application_cable/connection.rb new file mode 100644 index 0000000000..0ff5442f47 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/app/channels/application_cable/connection.rb @@ -0,0 +1,4 @@ +module ApplicationCable + class Connection < ActionCable::Connection::Base + end +end diff --git a/test/fixtures/deploy-rails-7.2/app/controllers/application_controller.rb b/test/fixtures/deploy-rails-7.2/app/controllers/application_controller.rb new file mode 100644 index 0000000000..0d95db22b4 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/app/controllers/application_controller.rb @@ -0,0 +1,4 @@ +class ApplicationController < ActionController::Base + # Only allow modern browsers supporting webp images, web push, badges, import maps, CSS nesting, and CSS :has. + allow_browser versions: :modern +end diff --git a/test/fixtures/deploy-rails-7.2/app/controllers/concerns/.keep b/test/fixtures/deploy-rails-7.2/app/controllers/concerns/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.2/app/helpers/application_helper.rb b/test/fixtures/deploy-rails-7.2/app/helpers/application_helper.rb new file mode 100644 index 0000000000..de6be7945c --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/app/helpers/application_helper.rb @@ -0,0 +1,2 @@ +module ApplicationHelper +end diff --git a/test/fixtures/deploy-rails-7.2/app/javascript/application.js b/test/fixtures/deploy-rails-7.2/app/javascript/application.js new file mode 100644 index 0000000000..0d7b49404c --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/app/javascript/application.js @@ -0,0 +1,3 @@ +// Configure your import map in config/importmap.rb. Read more: https://github.com/rails/importmap-rails +import "@hotwired/turbo-rails" +import "controllers" diff --git a/test/fixtures/deploy-rails-7.2/app/javascript/controllers/application.js b/test/fixtures/deploy-rails-7.2/app/javascript/controllers/application.js new file mode 100644 index 0000000000..1213e85c7a --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/app/javascript/controllers/application.js @@ -0,0 +1,9 @@ +import { Application } from "@hotwired/stimulus" + +const application = Application.start() + +// Configure Stimulus development experience +application.debug = false +window.Stimulus = application + +export { application } diff --git a/test/fixtures/deploy-rails-7.2/app/javascript/controllers/hello_controller.js b/test/fixtures/deploy-rails-7.2/app/javascript/controllers/hello_controller.js new file mode 100644 index 0000000000..5975c0789d --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/app/javascript/controllers/hello_controller.js @@ -0,0 +1,7 @@ +import { Controller } from "@hotwired/stimulus" + +export default class extends Controller { + connect() { + this.element.textContent = "Hello World!" + } +} diff --git a/test/fixtures/deploy-rails-7.2/app/javascript/controllers/index.js b/test/fixtures/deploy-rails-7.2/app/javascript/controllers/index.js new file mode 100644 index 0000000000..1156bf8362 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/app/javascript/controllers/index.js @@ -0,0 +1,4 @@ +// Import and register all your controllers from the importmap via controllers/**/*_controller +import { application } from "controllers/application" +import { eagerLoadControllersFrom } from "@hotwired/stimulus-loading" +eagerLoadControllersFrom("controllers", application) diff --git a/test/fixtures/deploy-rails-7.2/app/jobs/application_job.rb b/test/fixtures/deploy-rails-7.2/app/jobs/application_job.rb new file mode 100644 index 0000000000..d394c3d106 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/app/jobs/application_job.rb @@ -0,0 +1,7 @@ +class ApplicationJob < ActiveJob::Base + # Automatically retry jobs that encountered a deadlock + # retry_on ActiveRecord::Deadlocked + + # Most jobs are safe to ignore if the underlying records are no longer available + # discard_on ActiveJob::DeserializationError +end diff --git a/test/fixtures/deploy-rails-7.2/app/mailers/application_mailer.rb b/test/fixtures/deploy-rails-7.2/app/mailers/application_mailer.rb new file mode 100644 index 0000000000..3c34c8148f --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/app/mailers/application_mailer.rb @@ -0,0 +1,4 @@ +class ApplicationMailer < ActionMailer::Base + default from: "from@example.com" + layout "mailer" +end diff --git a/test/fixtures/deploy-rails-7.2/app/models/application_record.rb b/test/fixtures/deploy-rails-7.2/app/models/application_record.rb new file mode 100644 index 0000000000..b63caeb8a5 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/app/models/application_record.rb @@ -0,0 +1,3 @@ +class ApplicationRecord < ActiveRecord::Base + primary_abstract_class +end diff --git a/test/fixtures/deploy-rails-7.2/app/models/concerns/.keep b/test/fixtures/deploy-rails-7.2/app/models/concerns/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.2/app/views/layouts/application.html.erb b/test/fixtures/deploy-rails-7.2/app/views/layouts/application.html.erb new file mode 100644 index 0000000000..daba77d6e8 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/app/views/layouts/application.html.erb @@ -0,0 +1,23 @@ + + + + <%= content_for(:title) || "Deploy Rails 7" %> + + + <%= csrf_meta_tags %> + <%= csp_meta_tag %> + + <%= yield :head %> + + + + + + <%= stylesheet_link_tag "application", "data-turbo-track": "reload" %> + <%= javascript_importmap_tags %> + + + + <%= yield %> + + diff --git a/test/fixtures/deploy-rails-7.2/app/views/layouts/mailer.html.erb b/test/fixtures/deploy-rails-7.2/app/views/layouts/mailer.html.erb new file mode 100644 index 0000000000..3aac9002ed --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/app/views/layouts/mailer.html.erb @@ -0,0 +1,13 @@ + + + + + + + + + <%= yield %> + + diff --git a/test/fixtures/deploy-rails-7.2/app/views/layouts/mailer.text.erb b/test/fixtures/deploy-rails-7.2/app/views/layouts/mailer.text.erb new file mode 100644 index 0000000000..37f0bddbd7 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/app/views/layouts/mailer.text.erb @@ -0,0 +1 @@ +<%= yield %> diff --git a/test/fixtures/deploy-rails-7.2/app/views/pwa/manifest.json.erb b/test/fixtures/deploy-rails-7.2/app/views/pwa/manifest.json.erb new file mode 100644 index 0000000000..4485858055 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/app/views/pwa/manifest.json.erb @@ -0,0 +1,22 @@ +{ + "name": "DeployRails7", + "icons": [ + { + "src": "/icon.png", + "type": "image/png", + "sizes": "512x512" + }, + { + "src": "/icon.png", + "type": "image/png", + "sizes": "512x512", + "purpose": "maskable" + } + ], + "start_url": "/", + "display": "standalone", + "scope": "/", + "description": "DeployRails7.", + "theme_color": "red", + "background_color": "red" +} diff --git a/test/fixtures/deploy-rails-7.2/app/views/pwa/service-worker.js b/test/fixtures/deploy-rails-7.2/app/views/pwa/service-worker.js new file mode 100644 index 0000000000..b3a13fb7bb --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/app/views/pwa/service-worker.js @@ -0,0 +1,26 @@ +// Add a service worker for processing Web Push notifications: +// +// self.addEventListener("push", async (event) => { +// const { title, options } = await event.data.json() +// event.waitUntil(self.registration.showNotification(title, options)) +// }) +// +// self.addEventListener("notificationclick", function(event) { +// event.notification.close() +// event.waitUntil( +// clients.matchAll({ type: "window" }).then((clientList) => { +// for (let i = 0; i < clientList.length; i++) { +// let client = clientList[i] +// let clientPath = (new URL(client.url)).pathname +// +// if (clientPath == event.notification.data.path && "focus" in client) { +// return client.focus() +// } +// } +// +// if (clients.openWindow) { +// return clients.openWindow(event.notification.data.path) +// } +// }) +// ) +// }) diff --git a/test/fixtures/deploy-rails-7.2/bin/brakeman b/test/fixtures/deploy-rails-7.2/bin/brakeman new file mode 100755 index 0000000000..ace1c9ba08 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/bin/brakeman @@ -0,0 +1,7 @@ +#!/usr/bin/env ruby +require "rubygems" +require "bundler/setup" + +ARGV.unshift("--ensure-latest") + +load Gem.bin_path("brakeman", "brakeman") diff --git a/test/fixtures/deploy-rails-7.2/bin/bundle b/test/fixtures/deploy-rails-7.2/bin/bundle new file mode 100755 index 0000000000..981e650b68 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/bin/bundle @@ -0,0 +1,114 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +# +# This file was generated by Bundler. +# +# The application 'bundle' is installed as part of a gem, and +# this file is here to facilitate running it. +# + +require "rubygems" + +m = Module.new do + module_function + + def invoked_as_script? + File.expand_path($0) == File.expand_path(__FILE__) + end + + def env_var_version + ENV["BUNDLER_VERSION"] + end + + def cli_arg_version + return unless invoked_as_script? # don't want to hijack other binstubs + return unless "update".start_with?(ARGV.first || " ") # must be running `bundle update` + bundler_version = nil + update_index = nil + ARGV.each_with_index do |a, i| + if update_index && update_index.succ == i && a =~ Gem::Version::ANCHORED_VERSION_PATTERN + bundler_version = a + end + next unless a =~ /\A--bundler(?:[= ](#{Gem::Version::VERSION_PATTERN}))?\z/ + bundler_version = $1 + update_index = i + end + bundler_version + end + + def gemfile + gemfile = ENV["BUNDLE_GEMFILE"] + return gemfile if gemfile && !gemfile.empty? + + File.expand_path("../Gemfile", __dir__) + end + + def lockfile + lockfile = + case File.basename(gemfile) + when "gems.rb" then gemfile.sub(/\.rb$/, gemfile) + else "#{gemfile}.lock" + end + File.expand_path(lockfile) + end + + def lockfile_version + return unless File.file?(lockfile) + lockfile_contents = File.read(lockfile) + return unless lockfile_contents =~ /\n\nBUNDLED WITH\n\s{2,}(#{Gem::Version::VERSION_PATTERN})\n/ + Regexp.last_match(1) + end + + def bundler_requirement + @bundler_requirement ||= + env_var_version || cli_arg_version || + bundler_requirement_for(lockfile_version) + end + + def bundler_requirement_for(version) + return "#{Gem::Requirement.default}.a" unless version + + bundler_gem_version = Gem::Version.new(version) + + requirement = bundler_gem_version.approximate_recommendation + + return requirement unless Gem.rubygems_version < Gem::Version.new("2.7.0") + + requirement += ".a" if bundler_gem_version.prerelease? + + requirement + end + + def load_bundler! + ENV["BUNDLE_GEMFILE"] ||= gemfile + + activate_bundler + end + + def activate_bundler + gem_error = activation_error_handling do + gem "bundler", bundler_requirement + end + return if gem_error.nil? + require_error = activation_error_handling do + require "bundler/version" + end + return if require_error.nil? && Gem::Requirement.new(bundler_requirement).satisfied_by?(Gem::Version.new(Bundler::VERSION)) + warn "Activating bundler (#{bundler_requirement}) failed:\n#{gem_error.message}\n\nTo install the version of bundler this project requires, run `gem install bundler -v '#{bundler_requirement}'`" + exit 42 + end + + def activation_error_handling + yield + nil + rescue StandardError, LoadError => e + e + end +end + +m.load_bundler! + +if m.invoked_as_script? + load Gem.bin_path("bundler", "bundle") +end diff --git a/test/fixtures/deploy-rails-7.2/bin/docker-entrypoint b/test/fixtures/deploy-rails-7.2/bin/docker-entrypoint new file mode 100755 index 0000000000..840d093a9a --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/bin/docker-entrypoint @@ -0,0 +1,13 @@ +#!/bin/bash -e + +# Enable jemalloc for reduced memory usage and latency. +if [ -z "${LD_PRELOAD+x}" ] && [ -f /usr/lib/*/libjemalloc.so.2 ]; then + export LD_PRELOAD="$(echo /usr/lib/*/libjemalloc.so.2)" +fi + +# If running the rails server then create or migrate existing database +if [ "${1}" == "./bin/rails" ] && [ "${2}" == "server" ]; then + ./bin/rails db:prepare +fi + +exec "${@}" diff --git a/test/fixtures/deploy-rails-7.2/bin/importmap b/test/fixtures/deploy-rails-7.2/bin/importmap new file mode 100755 index 0000000000..36502ab16c --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/bin/importmap @@ -0,0 +1,4 @@ +#!/usr/bin/env ruby + +require_relative "../config/application" +require "importmap/commands" diff --git a/test/fixtures/deploy-rails-7.2/bin/rails b/test/fixtures/deploy-rails-7.2/bin/rails new file mode 100755 index 0000000000..efc0377492 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/bin/rails @@ -0,0 +1,4 @@ +#!/usr/bin/env ruby +APP_PATH = File.expand_path("../config/application", __dir__) +require_relative "../config/boot" +require "rails/commands" diff --git a/test/fixtures/deploy-rails-7.2/bin/rake b/test/fixtures/deploy-rails-7.2/bin/rake new file mode 100755 index 0000000000..4fbf10b960 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/bin/rake @@ -0,0 +1,4 @@ +#!/usr/bin/env ruby +require_relative "../config/boot" +require "rake" +Rake.application.run diff --git a/test/fixtures/deploy-rails-7.2/bin/rubocop b/test/fixtures/deploy-rails-7.2/bin/rubocop new file mode 100755 index 0000000000..40330c0ff1 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/bin/rubocop @@ -0,0 +1,8 @@ +#!/usr/bin/env ruby +require "rubygems" +require "bundler/setup" + +# explicit rubocop config increases performance slightly while avoiding config confusion. +ARGV.unshift("--config", File.expand_path("../.rubocop.yml", __dir__)) + +load Gem.bin_path("rubocop", "rubocop") diff --git a/test/fixtures/deploy-rails-7.2/bin/setup b/test/fixtures/deploy-rails-7.2/bin/setup new file mode 100755 index 0000000000..44a5eb6f3f --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/bin/setup @@ -0,0 +1,37 @@ +#!/usr/bin/env ruby +require "fileutils" + +APP_ROOT = File.expand_path("..", __dir__) +APP_NAME = "deploy-rails-7" + +def system!(*args) + system(*args, exception: true) +end + +FileUtils.chdir APP_ROOT do + # This script is a way to set up or update your development environment automatically. + # This script is idempotent, so that you can run it at any time and get an expectable outcome. + # Add necessary setup steps to this file. + + puts "== Installing dependencies ==" + system! "gem install bundler --conservative" + system("bundle check") || system!("bundle install") + + # puts "\n== Copying sample files ==" + # unless File.exist?("config/database.yml") + # FileUtils.cp "config/database.yml.sample", "config/database.yml" + # end + + puts "\n== Preparing database ==" + system! "bin/rails db:prepare" + + puts "\n== Removing old logs and tempfiles ==" + system! "bin/rails log:clear tmp:clear" + + puts "\n== Restarting application server ==" + system! "bin/rails restart" + + # puts "\n== Configuring puma-dev ==" + # system "ln -nfs #{APP_ROOT} ~/.puma-dev/#{APP_NAME}" + # system "curl -Is https://#{APP_NAME}.test/up | head -n 1" +end diff --git a/test/fixtures/deploy-rails-7.2/config.ru b/test/fixtures/deploy-rails-7.2/config.ru new file mode 100644 index 0000000000..4a3c09a688 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/config.ru @@ -0,0 +1,6 @@ +# This file is used by Rack-based servers to start the application. + +require_relative "config/environment" + +run Rails.application +Rails.application.load_server diff --git a/test/fixtures/deploy-rails-7.2/config/application.rb b/test/fixtures/deploy-rails-7.2/config/application.rb new file mode 100644 index 0000000000..ccb8bfe349 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/config/application.rb @@ -0,0 +1,27 @@ +require_relative "boot" + +require "rails/all" + +# Require the gems listed in Gemfile, including any gems +# you've limited to :test, :development, or :production. +Bundler.require(*Rails.groups) + +module DeployRails7 + class Application < Rails::Application + # Initialize configuration defaults for originally generated Rails version. + config.load_defaults 7.2 + + # Please, add to the `ignore` list any other `lib` subdirectories that do + # not contain `.rb` files, or that should not be reloaded or eager loaded. + # Common ones are `templates`, `generators`, or `middleware`, for example. + config.autoload_lib(ignore: %w[assets tasks]) + + # Configuration for the application, engines, and railties goes here. + # + # These settings can be overridden in specific environments using the files + # in config/environments, which are processed later. + # + # config.time_zone = "Central Time (US & Canada)" + # config.eager_load_paths << Rails.root.join("extras") + end +end diff --git a/test/fixtures/deploy-rails-7.2/config/boot.rb b/test/fixtures/deploy-rails-7.2/config/boot.rb new file mode 100644 index 0000000000..988a5ddc46 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/config/boot.rb @@ -0,0 +1,4 @@ +ENV["BUNDLE_GEMFILE"] ||= File.expand_path("../Gemfile", __dir__) + +require "bundler/setup" # Set up gems listed in the Gemfile. +require "bootsnap/setup" # Speed up boot time by caching expensive operations. diff --git a/test/fixtures/deploy-rails-7.2/config/cable.yml b/test/fixtures/deploy-rails-7.2/config/cable.yml new file mode 100644 index 0000000000..ffacc2f647 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/config/cable.yml @@ -0,0 +1,10 @@ +development: + adapter: async + +test: + adapter: test + +production: + adapter: redis + url: <%= ENV.fetch("REDIS_URL") { "redis://localhost:6379/1" } %> + channel_prefix: deploy_rails_7_production diff --git a/test/fixtures/deploy-rails-7.2/config/credentials.yml.enc b/test/fixtures/deploy-rails-7.2/config/credentials.yml.enc new file mode 100644 index 0000000000..ecff7731dd --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/config/credentials.yml.enc @@ -0,0 +1 @@ +Gkhk/4gkbLdSUZ+giBYulckjb5oo6CuR7BOEq/V9PPKMafhLLxxdky4lr1/lr4+hRIk4BjfcEGrxBwtMBUqewcIqRPg5WLQX1ti18Fzlp4LNNAkBwTK5t0dYl0/A/DbtJYlwrcazIG97mZIDubmr1cp2DAjk8gonRAmBGH9kuzod4ZFJGU+zewEU6i5elSrT0mins1tw2xlwS9MkY0DEgaXqmVvU0I6eQt5m2qpohA71kIgzAlwRaBSYIoBKpjHDMGQPemXAzl1AB0u6/x2w99ZnVRb7Pyk6RHNIctdtHXGpO0cWq4EYUv+UIzj4hWajDifZCErvxvgn7rwTiD83hIwhjguuVq+0MC+qkA900fRTUY60/tx8zeZe3JNnrQsXuklilg0MyFi0iiVK2FKskhrSTlMs--221rlrI+z1Au1fMN--+uSIAByfvLaNhpKuDyWaBQ== \ No newline at end of file diff --git a/test/fixtures/deploy-rails-7.2/config/database.yml b/test/fixtures/deploy-rails-7.2/config/database.yml new file mode 100644 index 0000000000..6d5cae3a19 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/config/database.yml @@ -0,0 +1,32 @@ +# SQLite. Versions 3.8.0 and up are supported. +# gem install sqlite3 +# +# Ensure the SQLite 3 gem is defined in your Gemfile +# gem "sqlite3" +# +default: &default + adapter: sqlite3 + pool: <%= ENV.fetch("RAILS_MAX_THREADS") { 5 } %> + timeout: 5000 + +development: + <<: *default + database: storage/development.sqlite3 + +# Warning: The database defined as "test" will be erased and +# re-generated from your development database when you run "rake". +# Do not set this db to the same as development or production. +test: + <<: *default + database: storage/test.sqlite3 + + +# SQLite3 write its data on the local filesystem, as such it requires +# persistent disks. If you are deploying to a managed service, you should +# make sure it provides disk persistence, as many don't. +# +# Similarly, if you deploy your application as a Docker container, you must +# ensure the database is located in a persisted volume. +production: + <<: *default + database: storage/production.sqlite3 diff --git a/test/fixtures/deploy-rails-7.2/config/environment.rb b/test/fixtures/deploy-rails-7.2/config/environment.rb new file mode 100644 index 0000000000..cac5315775 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/config/environment.rb @@ -0,0 +1,5 @@ +# Load the Rails application. +require_relative "application" + +# Initialize the Rails application. +Rails.application.initialize! diff --git a/test/fixtures/deploy-rails-7.2/config/environments/development.rb b/test/fixtures/deploy-rails-7.2/config/environments/development.rb new file mode 100644 index 0000000000..9b67360065 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/config/environments/development.rb @@ -0,0 +1,81 @@ +require "active_support/core_ext/integer/time" + +Rails.application.configure do + # Settings specified here will take precedence over those in config/application.rb. + + # In the development environment your application's code is reloaded any time + # it changes. This slows down response time but is perfect for development + # since you don't have to restart the web server when you make code changes. + config.enable_reloading = true + + # Do not eager load code on boot. + config.eager_load = false + + # Show full error reports. + config.consider_all_requests_local = true + + # Enable server timing. + config.server_timing = true + + # Enable/disable caching. By default caching is disabled. + # Run rails dev:cache to toggle caching. + if Rails.root.join("tmp/caching-dev.txt").exist? + config.action_controller.perform_caching = true + config.action_controller.enable_fragment_cache_logging = true + + config.cache_store = :memory_store + config.public_file_server.headers = { "Cache-Control" => "public, max-age=#{2.days.to_i}" } + else + config.action_controller.perform_caching = false + + config.cache_store = :null_store + end + + # Store uploaded files on the local file system (see config/storage.yml for options). + config.active_storage.service = :local + + # Don't care if the mailer can't send. + config.action_mailer.raise_delivery_errors = false + + # Disable caching for Action Mailer templates even if Action Controller + # caching is enabled. + config.action_mailer.perform_caching = false + + config.action_mailer.default_url_options = { host: "localhost", port: 3000 } + + # Print deprecation notices to the Rails logger. + config.active_support.deprecation = :log + + # Raise exceptions for disallowed deprecations. + config.active_support.disallowed_deprecation = :raise + + # Tell Active Support which deprecation messages to disallow. + config.active_support.disallowed_deprecation_warnings = [] + + # Raise an error on page load if there are pending migrations. + config.active_record.migration_error = :page_load + + # Highlight code that triggered database queries in logs. + config.active_record.verbose_query_logs = true + + # Highlight code that enqueued background job in logs. + config.active_job.verbose_enqueue_logs = true + + # Suppress logger output for asset requests. + config.assets.quiet = true + + # Raises error for missing translations. + # config.i18n.raise_on_missing_translations = true + + # Annotate rendered view with file names. + config.action_view.annotate_rendered_view_with_filenames = true + + # Uncomment if you wish to allow Action Cable access from any origin. + # config.action_cable.disable_request_forgery_protection = true + + # Raise error when a before_action's only/except options reference missing actions. + config.action_controller.raise_on_missing_callback_actions = true + + # Apply autocorrection by RuboCop to files generated by `bin/rails generate`. + # config.generators.apply_rubocop_autocorrect_after_generate! +end diff --git a/test/fixtures/deploy-rails-7.2/config/environments/production.rb b/test/fixtures/deploy-rails-7.2/config/environments/production.rb new file mode 100644 index 0000000000..e738bb655e --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/config/environments/production.rb @@ -0,0 +1,102 @@ +require "active_support/core_ext/integer/time" + +Rails.application.configure do + # Settings specified here will take precedence over those in config/application.rb. + + # Code is not reloaded between requests. + config.enable_reloading = false + + # Eager load code on boot. This eager loads most of Rails and + # your application in memory, allowing both threaded web servers + # and those relying on copy on write to perform better. + # Rake tasks automatically ignore this option for performance. + config.eager_load = true + + # Full error reports are disabled and caching is turned on. + config.consider_all_requests_local = false + config.action_controller.perform_caching = true + + # Ensures that a master key has been made available in ENV["RAILS_MASTER_KEY"], config/master.key, or an environment + # key such as config/credentials/production.key. This key is used to decrypt credentials (and other encrypted files). + # config.require_master_key = true + + # Disable serving static files from `public/`, relying on NGINX/Apache to do so instead. + # config.public_file_server.enabled = false + + # Compress CSS using a preprocessor. + # config.assets.css_compressor = :sass + + # Do not fall back to assets pipeline if a precompiled asset is missed. + config.assets.compile = false + + # Enable serving of images, stylesheets, and JavaScripts from an asset server. + # config.asset_host = "http://assets.example.com" + + # Specifies the header that your server uses for sending files. + # config.action_dispatch.x_sendfile_header = "X-Sendfile" # for Apache + # config.action_dispatch.x_sendfile_header = "X-Accel-Redirect" # for NGINX + + # Store uploaded files on the local file system (see config/storage.yml for options). + config.active_storage.service = :local + + # Mount Action Cable outside main process or domain. + # config.action_cable.mount_path = nil + # config.action_cable.url = "wss://example.com/cable" + # config.action_cable.allowed_request_origins = [ "http://example.com", /http:\/\/example.*/ ] + + # Assume all access to the app is happening through a SSL-terminating reverse proxy. + # Can be used together with config.force_ssl for Strict-Transport-Security and secure cookies. + # config.assume_ssl = true + + # Force all access to the app over SSL, use Strict-Transport-Security, and use secure cookies. + config.force_ssl = true + + # Skip http-to-https redirect for the default health check endpoint. + # config.ssl_options = { redirect: { exclude: ->(request) { request.path == "/up" } } } + + # Log to STDOUT by default + config.logger = ActiveSupport::Logger.new(STDOUT) + .tap { |logger| logger.formatter = ::Logger::Formatter.new } + .then { |logger| ActiveSupport::TaggedLogging.new(logger) } + + # Prepend all log lines with the following tags. + config.log_tags = [ :request_id ] + + # "info" includes generic and useful information about system operation, but avoids logging too much + # information to avoid inadvertent exposure of personally identifiable information (PII). If you + # want to log everything, set the level to "debug". + config.log_level = ENV.fetch("RAILS_LOG_LEVEL", "info") + + # Use a different cache store in production. + # config.cache_store = :mem_cache_store + + # Use a real queuing backend for Active Job (and separate queues per environment). + # config.active_job.queue_adapter = :resque + # config.active_job.queue_name_prefix = "deploy_rails_7_production" + + # Disable caching for Action Mailer templates even if Action Controller + # caching is enabled. + config.action_mailer.perform_caching = false + + # Ignore bad email addresses and do not raise email delivery errors. + # Set this to true and configure the email server for immediate delivery to raise delivery errors. + # config.action_mailer.raise_delivery_errors = false + + # Enable locale fallbacks for I18n (makes lookups for any locale fall back to + # the I18n.default_locale when a translation cannot be found). + config.i18n.fallbacks = true + + # Don't log any deprecations. + config.active_support.report_deprecations = false + + # Do not dump schema after migrations. + config.active_record.dump_schema_after_migration = false + + # Enable DNS rebinding protection and other `Host` header attacks. + # config.hosts = [ + # "example.com", # Allow requests from example.com + # /.*\.example\.com/ # Allow requests from subdomains like `www.example.com` + # ] + # Skip DNS rebinding protection for the default health check endpoint. + # config.host_authorization = { exclude: ->(request) { request.path == "/up" } } +end diff --git a/test/fixtures/deploy-rails-7.2/config/environments/test.rb b/test/fixtures/deploy-rails-7.2/config/environments/test.rb new file mode 100644 index 0000000000..0c616a1bf5 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/config/environments/test.rb @@ -0,0 +1,67 @@ +require "active_support/core_ext/integer/time" + +# The test environment is used exclusively to run your application's +# test suite. You never need to work with it otherwise. Remember that +# your test database is "scratch space" for the test suite and is wiped +# and recreated between test runs. Don't rely on the data there! + +Rails.application.configure do + # Settings specified here will take precedence over those in config/application.rb. + + # While tests run files are not watched, reloading is not necessary. + config.enable_reloading = false + + # Eager loading loads your entire application. When running a single test locally, + # this is usually not necessary, and can slow down your test suite. However, it's + # recommended that you enable it in continuous integration systems to ensure eager + # loading is working properly before deploying your code. + config.eager_load = ENV["CI"].present? + + # Configure public file server for tests with Cache-Control for performance. + config.public_file_server.headers = { "Cache-Control" => "public, max-age=#{1.hour.to_i}" } + + # Show full error reports and disable caching. + config.consider_all_requests_local = true + config.action_controller.perform_caching = false + config.cache_store = :null_store + + # Render exception templates for rescuable exceptions and raise for other exceptions. + config.action_dispatch.show_exceptions = :rescuable + + # Disable request forgery protection in test environment. + config.action_controller.allow_forgery_protection = false + + # Store uploaded files on the local file system in a temporary directory. + config.active_storage.service = :test + + # Disable caching for Action Mailer templates even if Action Controller + # caching is enabled. + config.action_mailer.perform_caching = false + + # Tell Action Mailer not to deliver emails to the real world. + # The :test delivery method accumulates sent emails in the + # ActionMailer::Base.deliveries array. + config.action_mailer.delivery_method = :test + + # Unlike controllers, the mailer instance doesn't have any context about the + # incoming request so you'll need to provide the :host parameter yourself. + config.action_mailer.default_url_options = { host: "www.example.com" } + + # Print deprecation notices to the stderr. + config.active_support.deprecation = :stderr + + # Raise exceptions for disallowed deprecations. + config.active_support.disallowed_deprecation = :raise + + # Tell Active Support which deprecation messages to disallow. + config.active_support.disallowed_deprecation_warnings = [] + + # Raises error for missing translations. + # config.i18n.raise_on_missing_translations = true + + # Annotate rendered view with file names. + # config.action_view.annotate_rendered_view_with_filenames = true + + # Raise error when a before_action's only/except options reference missing actions. + config.action_controller.raise_on_missing_callback_actions = true +end diff --git a/test/fixtures/deploy-rails-7.2/config/importmap.rb b/test/fixtures/deploy-rails-7.2/config/importmap.rb new file mode 100644 index 0000000000..909dfc542d --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/config/importmap.rb @@ -0,0 +1,7 @@ +# Pin npm packages by running ./bin/importmap + +pin "application" +pin "@hotwired/turbo-rails", to: "turbo.min.js" +pin "@hotwired/stimulus", to: "stimulus.min.js" +pin "@hotwired/stimulus-loading", to: "stimulus-loading.js" +pin_all_from "app/javascript/controllers", under: "controllers" diff --git a/test/fixtures/deploy-rails-7.2/config/initializers/assets.rb b/test/fixtures/deploy-rails-7.2/config/initializers/assets.rb new file mode 100644 index 0000000000..bd5bcd2b6a --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/config/initializers/assets.rb @@ -0,0 +1,12 @@ +# Be sure to restart your server when you modify this file. + +# Version of your assets, change this if you want to expire all your assets. +Rails.application.config.assets.version = "1.0" + +# Add additional assets to the asset load path. +# Rails.application.config.assets.paths << Emoji.images_path + +# Precompile additional assets. +# application.js, application.css, and all non-JS/CSS in the app/assets +# folder are already added. +# Rails.application.config.assets.precompile += %w[ admin.js admin.css ] diff --git a/test/fixtures/deploy-rails-7.2/config/initializers/content_security_policy.rb b/test/fixtures/deploy-rails-7.2/config/initializers/content_security_policy.rb new file mode 100644 index 0000000000..b3076b38fe --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/config/initializers/content_security_policy.rb @@ -0,0 +1,25 @@ +# Be sure to restart your server when you modify this file. + +# Define an application-wide content security policy. +# See the Securing Rails Applications Guide for more information: +# https://guides.rubyonrails.org/security.html#content-security-policy-header + +# Rails.application.configure do +# config.content_security_policy do |policy| +# policy.default_src :self, :https +# policy.font_src :self, :https, :data +# policy.img_src :self, :https, :data +# policy.object_src :none +# policy.script_src :self, :https +# policy.style_src :self, :https +# # Specify URI for violation reports +# # policy.report_uri "/csp-violation-report-endpoint" +# end +# +# # Generate session nonces for permitted importmap, inline scripts, and inline styles. +# config.content_security_policy_nonce_generator = ->(request) { request.session.id.to_s } +# config.content_security_policy_nonce_directives = %w(script-src style-src) +# +# # Report violations without enforcing the policy. +# # config.content_security_policy_report_only = true +# end diff --git a/test/fixtures/deploy-rails-7.2/config/initializers/filter_parameter_logging.rb b/test/fixtures/deploy-rails-7.2/config/initializers/filter_parameter_logging.rb new file mode 100644 index 0000000000..c010b83ddd --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/config/initializers/filter_parameter_logging.rb @@ -0,0 +1,8 @@ +# Be sure to restart your server when you modify this file. + +# Configure parameters to be partially matched (e.g. passw matches password) and filtered from the log file. +# Use this to limit dissemination of sensitive information. +# See the ActiveSupport::ParameterFilter documentation for supported notations and behaviors. +Rails.application.config.filter_parameters += [ + :passw, :email, :secret, :token, :_key, :crypt, :salt, :certificate, :otp, :ssn +] diff --git a/test/fixtures/deploy-rails-7.2/config/initializers/inflections.rb b/test/fixtures/deploy-rails-7.2/config/initializers/inflections.rb new file mode 100644 index 0000000000..3860f659ea --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/config/initializers/inflections.rb @@ -0,0 +1,16 @@ +# Be sure to restart your server when you modify this file. + +# Add new inflection rules using the following format. Inflections +# are locale specific, and you may define rules for as many different +# locales as you wish. All of these examples are active by default: +# ActiveSupport::Inflector.inflections(:en) do |inflect| +# inflect.plural /^(ox)$/i, "\\1en" +# inflect.singular /^(ox)en/i, "\\1" +# inflect.irregular "person", "people" +# inflect.uncountable %w( fish sheep ) +# end + +# These inflection rules are supported but not enabled by default: +# ActiveSupport::Inflector.inflections(:en) do |inflect| +# inflect.acronym "RESTful" +# end diff --git a/test/fixtures/deploy-rails-7.2/config/initializers/permissions_policy.rb b/test/fixtures/deploy-rails-7.2/config/initializers/permissions_policy.rb new file mode 100644 index 0000000000..7db3b9577e --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/config/initializers/permissions_policy.rb @@ -0,0 +1,13 @@ +# Be sure to restart your server when you modify this file. + +# Define an application-wide HTTP permissions policy. For further +# information see: https://developers.google.com/web/updates/2018/06/feature-policy + +# Rails.application.config.permissions_policy do |policy| +# policy.camera :none +# policy.gyroscope :none +# policy.microphone :none +# policy.usb :none +# policy.fullscreen :self +# policy.payment :self, "https://secure.example.com" +# end diff --git a/test/fixtures/deploy-rails-7.2/config/locales/en.yml b/test/fixtures/deploy-rails-7.2/config/locales/en.yml new file mode 100644 index 0000000000..6c349ae5e3 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/config/locales/en.yml @@ -0,0 +1,31 @@ +# Files in the config/locales directory are used for internationalization and +# are automatically loaded by Rails. If you want to use locales other than +# English, add the necessary files in this directory. +# +# To use the locales, use `I18n.t`: +# +# I18n.t "hello" +# +# In views, this is aliased to just `t`: +# +# <%= t("hello") %> +# +# To use a different locale, set it with `I18n.locale`: +# +# I18n.locale = :es +# +# This would use the information in config/locales/es.yml. +# +# To learn more about the API, please read the Rails Internationalization guide +# at https://guides.rubyonrails.org/i18n.html. +# +# Be aware that YAML interprets the following case-insensitive strings as +# booleans: `true`, `false`, `on`, `off`, `yes`, `no`. Therefore, these strings +# must be quoted to be interpreted as strings. For example: +# +# en: +# "yes": yup +# enabled: "ON" + +en: + hello: "Hello world" diff --git a/test/fixtures/deploy-rails-7.2/config/puma.rb b/test/fixtures/deploy-rails-7.2/config/puma.rb new file mode 100644 index 0000000000..03c166f4cf --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/config/puma.rb @@ -0,0 +1,34 @@ +# This configuration file will be evaluated by Puma. The top-level methods that +# are invoked here are part of Puma's configuration DSL. For more information +# about methods provided by the DSL, see https://puma.io/puma/Puma/DSL.html. + +# Puma starts a configurable number of processes (workers) and each process +# serves each request in a thread from an internal thread pool. +# +# The ideal number of threads per worker depends both on how much time the +# application spends waiting for IO operations and on how much you wish to +# to prioritize throughput over latency. +# +# As a rule of thumb, increasing the number of threads will increase how much +# traffic a given process can handle (throughput), but due to CRuby's +# Global VM Lock (GVL) it has diminishing returns and will degrade the +# response time (latency) of the application. +# +# The default is set to 3 threads as it's deemed a decent compromise between +# throughput and latency for the average Rails application. +# +# Any libraries that use a connection pool or another resource pool should +# be configured to provide at least as many connections as the number of +# threads. This includes Active Record's `pool` parameter in `database.yml`. +threads_count = ENV.fetch("RAILS_MAX_THREADS", 3) +threads threads_count, threads_count + +# Specifies the `port` that Puma will listen on to receive requests; default is 3000. +port ENV.fetch("PORT", 3000) + +# Allow puma to be restarted by `bin/rails restart` command. +plugin :tmp_restart + +# Specify the PID file. Defaults to tmp/pids/server.pid in development. +# In other environments, only set the PID file if requested. +pidfile ENV["PIDFILE"] if ENV["PIDFILE"] diff --git a/test/fixtures/deploy-rails-7.2/config/routes.rb b/test/fixtures/deploy-rails-7.2/config/routes.rb new file mode 100644 index 0000000000..33c9639036 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/config/routes.rb @@ -0,0 +1,14 @@ +Rails.application.routes.draw do + # Define your application routes per the DSL in https://guides.rubyonrails.org/routing.html + + # Reveal health status on /up that returns 200 if the app boots with no exceptions, otherwise 500. + # Can be used by load balancers and uptime monitors to verify that the app is live. + get "up" => "rails/health#show", as: :rails_health_check + + # Render dynamic PWA files from app/views/pwa/* + get "service-worker" => "rails/pwa#service_worker", as: :pwa_service_worker + get "manifest" => "rails/pwa#manifest", as: :pwa_manifest + + # Defines the root path route ("/") + # root "posts#index" +end diff --git a/test/fixtures/deploy-rails-7.2/config/storage.yml b/test/fixtures/deploy-rails-7.2/config/storage.yml new file mode 100644 index 0000000000..4942ab6694 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/config/storage.yml @@ -0,0 +1,34 @@ +test: + service: Disk + root: <%= Rails.root.join("tmp/storage") %> + +local: + service: Disk + root: <%= Rails.root.join("storage") %> + +# Use bin/rails credentials:edit to set the AWS secrets (as aws:access_key_id|secret_access_key) +# amazon: +# service: S3 +# access_key_id: <%= Rails.application.credentials.dig(:aws, :access_key_id) %> +# secret_access_key: <%= Rails.application.credentials.dig(:aws, :secret_access_key) %> +# region: us-east-1 +# bucket: your_own_bucket-<%= Rails.env %> + +# Remember not to checkin your GCS keyfile to a repository +# google: +# service: GCS +# project: your_project +# credentials: <%= Rails.root.join("path/to/gcs.keyfile") %> +# bucket: your_own_bucket-<%= Rails.env %> + +# Use bin/rails credentials:edit to set the Azure Storage secret (as azure_storage:storage_access_key) +# microsoft: +# service: AzureStorage +# storage_account_name: your_account_name +# storage_access_key: <%= Rails.application.credentials.dig(:azure_storage, :storage_access_key) %> +# container: your_container_name-<%= Rails.env %> + +# mirror: +# service: Mirror +# primary: local +# mirrors: [ amazon, google, microsoft ] diff --git a/test/fixtures/deploy-rails-7.2/db/seeds.rb b/test/fixtures/deploy-rails-7.2/db/seeds.rb new file mode 100644 index 0000000000..4fbd6ed970 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/db/seeds.rb @@ -0,0 +1,9 @@ +# This file should ensure the existence of records required to run the application in every environment (production, +# development, test). The code here should be idempotent so that it can be executed at any point in every environment. +# The data can then be loaded with the bin/rails db:seed command (or created alongside the database with db:setup). +# +# Example: +# +# ["Action", "Comedy", "Drama", "Horror"].each do |genre_name| +# MovieGenre.find_or_create_by!(name: genre_name) +# end diff --git a/test/fixtures/deploy-rails-7.2/lib/assets/.keep b/test/fixtures/deploy-rails-7.2/lib/assets/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.2/lib/tasks/.keep b/test/fixtures/deploy-rails-7.2/lib/tasks/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.2/log/.keep b/test/fixtures/deploy-rails-7.2/log/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.2/public/404.html b/test/fixtures/deploy-rails-7.2/public/404.html new file mode 100644 index 0000000000..2be3af26fc --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/public/404.html @@ -0,0 +1,67 @@ + + + + The page you were looking for doesn't exist (404) + + + + + + +
+
+

The page you were looking for doesn't exist.

+

You may have mistyped the address or the page may have moved.

+
+

If you are the application owner check the logs for more information.

+
+ + diff --git a/test/fixtures/deploy-rails-7.2/public/406-unsupported-browser.html b/test/fixtures/deploy-rails-7.2/public/406-unsupported-browser.html new file mode 100644 index 0000000000..7cf1e168e6 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/public/406-unsupported-browser.html @@ -0,0 +1,66 @@ + + + + Your browser is not supported (406) + + + + + + +
+
+

Your browser is not supported.

+

Please upgrade your browser to continue.

+
+
+ + diff --git a/test/fixtures/deploy-rails-7.2/public/422.html b/test/fixtures/deploy-rails-7.2/public/422.html new file mode 100644 index 0000000000..c08eac0d1d --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/public/422.html @@ -0,0 +1,67 @@ + + + + The change you wanted was rejected (422) + + + + + + +
+
+

The change you wanted was rejected.

+

Maybe you tried to change something you didn't have access to.

+
+

If you are the application owner check the logs for more information.

+
+ + diff --git a/test/fixtures/deploy-rails-7.2/public/500.html b/test/fixtures/deploy-rails-7.2/public/500.html new file mode 100644 index 0000000000..78a030af22 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/public/500.html @@ -0,0 +1,66 @@ + + + + We're sorry, but something went wrong (500) + + + + + + +
+
+

We're sorry, but something went wrong.

+
+

If you are the application owner check the logs for more information.

+
+ + diff --git a/test/fixtures/deploy-rails-7.2/public/icon.png b/test/fixtures/deploy-rails-7.2/public/icon.png new file mode 100644 index 0000000000000000000000000000000000000000..f3b5abcbde91cf6d7a6a26e514eb7e30f476f950 GIT binary patch literal 5599 zcmeHL-D}fO6hCR_taXJlzs3}~RuB=Iujyo=i*=1|1FN%E=zNfMTjru|Q<6v{J{U!C zBEE}?j6I3sz>fzN!6}L_BKjcuASk~1;Dg|U_@d{g?V8mM`~#9U+>>*Ezw>c(PjYWA z4(;!cgge6k5E&d$G5`S-0}!Ik>CV(0Y#1}s-v_gAHhja2=W1?nBAte9D2HG<(+)uj z!5=W4u*{VKMw#{V@^NNs4TClr!FAA%ID-*gc{R%CFKEzG<6gm*9s_uy)oMGW*=nJf zw{(Mau|2FHfXIv6C0@Wk5k)F=3jo1srV-C{pl&k&)4_&JjYrnbJiul}d0^NCSh(#7h=F;3{|>EU>h z6U8_p;^wK6mAB(1b92>5-HxJ~V}@3?G`&Qq-TbJ2(&~-HsH6F#8mFaAG(45eT3VPO zM|(Jd<+;UZs;w>0Qw}0>D%{~r{uo_Fl5_Bo3ABWi zWo^j^_T3dxG6J6fH8X)$a^%TJ#PU!=LxF=#Fd9EvKx_x>q<(KY%+y-08?kN9dXjXK z**Q=yt-FTU*13ouhCdqq-0&;Ke{T3sQU9IdzhV9LhQIpq*P{N)+}|Mh+a-VV=x?R} c>%+pvTcMWshj-umO}|qP?%A)*_KlqT3uEqhU;qFB literal 0 HcmV?d00001 diff --git a/test/fixtures/deploy-rails-7.2/public/icon.svg b/test/fixtures/deploy-rails-7.2/public/icon.svg new file mode 100644 index 0000000000..78307ccd4b --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/public/icon.svg @@ -0,0 +1,3 @@ + + + diff --git a/test/fixtures/deploy-rails-7.2/public/robots.txt b/test/fixtures/deploy-rails-7.2/public/robots.txt new file mode 100644 index 0000000000..c19f78ab68 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/public/robots.txt @@ -0,0 +1 @@ +# See https://www.robotstxt.org/robotstxt.html for documentation on how to use the robots.txt file diff --git a/test/fixtures/deploy-rails-7.2/storage/.keep b/test/fixtures/deploy-rails-7.2/storage/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.2/test/application_system_test_case.rb b/test/fixtures/deploy-rails-7.2/test/application_system_test_case.rb new file mode 100644 index 0000000000..cee29fd214 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/test/application_system_test_case.rb @@ -0,0 +1,5 @@ +require "test_helper" + +class ApplicationSystemTestCase < ActionDispatch::SystemTestCase + driven_by :selenium, using: :headless_chrome, screen_size: [ 1400, 1400 ] +end diff --git a/test/fixtures/deploy-rails-7.2/test/channels/application_cable/connection_test.rb b/test/fixtures/deploy-rails-7.2/test/channels/application_cable/connection_test.rb new file mode 100644 index 0000000000..6340bf9c04 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/test/channels/application_cable/connection_test.rb @@ -0,0 +1,13 @@ +require "test_helper" + +module ApplicationCable + class ConnectionTest < ActionCable::Connection::TestCase + # test "connects with cookies" do + # cookies.signed[:user_id] = 42 + # + # connect + # + # assert_equal connection.user_id, "42" + # end + end +end diff --git a/test/fixtures/deploy-rails-7.2/test/controllers/.keep b/test/fixtures/deploy-rails-7.2/test/controllers/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.2/test/fixtures/files/.keep b/test/fixtures/deploy-rails-7.2/test/fixtures/files/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.2/test/helpers/.keep b/test/fixtures/deploy-rails-7.2/test/helpers/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.2/test/integration/.keep b/test/fixtures/deploy-rails-7.2/test/integration/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.2/test/mailers/.keep b/test/fixtures/deploy-rails-7.2/test/mailers/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.2/test/models/.keep b/test/fixtures/deploy-rails-7.2/test/models/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.2/test/system/.keep b/test/fixtures/deploy-rails-7.2/test/system/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.2/test/test_helper.rb b/test/fixtures/deploy-rails-7.2/test/test_helper.rb new file mode 100644 index 0000000000..0c22470ec1 --- /dev/null +++ b/test/fixtures/deploy-rails-7.2/test/test_helper.rb @@ -0,0 +1,15 @@ +ENV["RAILS_ENV"] ||= "test" +require_relative "../config/environment" +require "rails/test_help" + +module ActiveSupport + class TestCase + # Run tests in parallel with specified workers + parallelize(workers: :number_of_processors) + + # Setup all fixtures in test/fixtures/*.yml for all tests in alphabetical order. + fixtures :all + + # Add more helper methods to be used by all tests here... + end +end diff --git a/test/fixtures/deploy-rails-7.2/vendor/.keep b/test/fixtures/deploy-rails-7.2/vendor/.keep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/deploy-rails-7.2/vendor/javascript/.keep b/test/fixtures/deploy-rails-7.2/vendor/javascript/.keep new file mode 100644 index 0000000000..e69de29bb2 From c0c19830630616f5c559ff01831aa41cf49c8b29 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 21 Oct 2024 14:14:36 -0400 Subject: [PATCH 079/104] ensure flushing of stdout and stderr before exit --- deploy.rb | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/deploy.rb b/deploy.rb index 907389a33a..2eaa32d6b2 100755 --- a/deploy.rb +++ b/deploy.rb @@ -5,6 +5,8 @@ require './deploy/common' +begin + event :start, { ts: ts() } # Change to a directory where we'll pull on git @@ -433,4 +435,9 @@ end end -event :end, { ts: ts() } \ No newline at end of file +event :end, { ts: ts() } + +ensure + $stdout.flush + $stderr.flush +end \ No newline at end of file From 2b205e0a32a8c19732bcc765d8e0238344c4b137 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Mon, 21 Oct 2024 15:51:12 -0400 Subject: [PATCH 080/104] sleep 1 second after flushing just to be sure all log lines are ingested --- deploy.rb | 2 ++ 1 file changed, 2 insertions(+) diff --git a/deploy.rb b/deploy.rb index 2eaa32d6b2..077189f1a4 100755 --- a/deploy.rb +++ b/deploy.rb @@ -440,4 +440,6 @@ ensure $stdout.flush $stderr.flush + + sleep 1.0 end \ No newline at end of file From 7e16e264b7388fc552d79c3272da16d1d9f0e531 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Tue, 22 Oct 2024 09:08:43 -0400 Subject: [PATCH 081/104] Add node yarn test and fix yarn installation for node > 18 (#4023) add node yarn app and fix yarn installation for node > 18 --- scanner/node.go | 3 +- scanner/templates/node/Dockerfile | 2 +- test/deployer/deployer_test.go | 16 + test/fixtures/deploy-node-yarn/package.json | 5 + test/fixtures/deploy-node-yarn/server.js | 14 + test/fixtures/deploy-node-yarn/yarn.lock | 321 ++++++++++++++++++++ 6 files changed, 359 insertions(+), 2 deletions(-) create mode 100644 test/fixtures/deploy-node-yarn/package.json create mode 100644 test/fixtures/deploy-node-yarn/server.js create mode 100644 test/fixtures/deploy-node-yarn/yarn.lock diff --git a/scanner/node.go b/scanner/node.go index 85804b2353..e9c0dac1eb 100644 --- a/scanner/node.go +++ b/scanner/node.go @@ -79,7 +79,8 @@ func configureNode(sourceDir string, config *ScannerConfig) (*SourceInfo, error) package_files := []string{"package.json"} _, err = os.Stat("yarn.lock") - vars["yarn"] = !os.IsNotExist(err) + // install yarn if there's a yarn.lock and if nodejs version is under 18 + vars["yarn"] = !os.IsNotExist(err) && nodeVersion < "18" if os.IsNotExist(err) { vars["packager"] = "npm" diff --git a/scanner/templates/node/Dockerfile b/scanner/templates/node/Dockerfile index 44eea37687..a707442942 100644 --- a/scanner/templates/node/Dockerfile +++ b/scanner/templates/node/Dockerfile @@ -14,7 +14,7 @@ ENV NODE_ENV=production {{ if .yarn -}} ARG YARN_VERSION={{ .yarnVersion }} -RUN npm install -g yarn@$YARN_VERSION +RUN npm install --force -g yarn@$YARN_VERSION {{ end }} # Throw-away build stage to reduce size of final image diff --git a/test/deployer/deployer_test.go b/test/deployer/deployer_test.go index 4efaf01fa2..bab9f81461 100644 --- a/test/deployer/deployer_test.go +++ b/test/deployer/deployer_test.go @@ -36,6 +36,22 @@ func TestDeployBasicNode(t *testing.T) { require.Contains(t, string(body), fmt.Sprintf("Hello, World! %s", deploy.Extra["TEST_ID"].(string))) } +func TestLaunchBasicNodeYarn(t *testing.T) { + deploy := testDeployer(t, + withFixtureApp("deploy-node-yarn"), + createRandomApp, + testlib.WithoutCustomize, + testlib.WithouExtensions, + testlib.DeployNow, + withWorkDirAppSource, + ) + + body, err := testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev", deploy.Extra["appName"].(string))) + require.NoError(t, err) + + require.Contains(t, string(body), "Hello World") +} + func TestDeployBasicNodeWithCustomConfigPath(t *testing.T) { deploy := testDeployer(t, withCustomFlyTomlPath("custom-fly-config.toml"), diff --git a/test/fixtures/deploy-node-yarn/package.json b/test/fixtures/deploy-node-yarn/package.json new file mode 100644 index 0000000000..f9c236da6d --- /dev/null +++ b/test/fixtures/deploy-node-yarn/package.json @@ -0,0 +1,5 @@ +{ + "dependencies": { + "request": "^2.88.2" + } +} diff --git a/test/fixtures/deploy-node-yarn/server.js b/test/fixtures/deploy-node-yarn/server.js new file mode 100644 index 0000000000..79120d7042 --- /dev/null +++ b/test/fixtures/deploy-node-yarn/server.js @@ -0,0 +1,14 @@ +const { createServer } = require('node:http'); + +const hostname = '0.0.0.0'; +const port = process.env.PORT || '8080'; + +const server = createServer((req, res) => { + res.statusCode = 200; + res.setHeader('Content-Type', 'text/plain'); + res.end('Hello World'); +}); + +server.listen(port, hostname, () => { + console.log(`Server running at http://${hostname}:${port}/`); +}); diff --git a/test/fixtures/deploy-node-yarn/yarn.lock b/test/fixtures/deploy-node-yarn/yarn.lock new file mode 100644 index 0000000000..85a16a7924 --- /dev/null +++ b/test/fixtures/deploy-node-yarn/yarn.lock @@ -0,0 +1,321 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +ajv@^6.12.3: + version "6.12.6" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.12.6.tgz#baf5a62e802b07d977034586f8c3baf5adf26df4" + integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g== + dependencies: + fast-deep-equal "^3.1.1" + fast-json-stable-stringify "^2.0.0" + json-schema-traverse "^0.4.1" + uri-js "^4.2.2" + +asn1@~0.2.3: + version "0.2.6" + resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.6.tgz#0d3a7bb6e64e02a90c0303b31f292868ea09a08d" + integrity sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ== + dependencies: + safer-buffer "~2.1.0" + +assert-plus@1.0.0, assert-plus@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525" + integrity sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw== + +asynckit@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" + integrity sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q== + +aws-sign2@~0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.7.0.tgz#b46e890934a9591f2d2f6f86d7e6a9f1b3fe76a8" + integrity sha512-08kcGqnYf/YmjoRhfxyu+CLxBjUtHLXLXX/vUfx9l2LYzG3c1m61nrpyFUZI6zeS+Li/wWMMidD9KgrqtGq3mA== + +aws4@^1.8.0: + version "1.13.2" + resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.13.2.tgz#0aa167216965ac9474ccfa83892cfb6b3e1e52ef" + integrity sha512-lHe62zvbTB5eEABUVi/AwVh0ZKY9rMMDhmm+eeyuuUQbQ3+J+fONVQOZyj+DdrvD4BY33uYniyRJ4UJIaSKAfw== + +bcrypt-pbkdf@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz#a4301d389b6a43f9b67ff3ca11a3f6637e360e9e" + integrity sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w== + dependencies: + tweetnacl "^0.14.3" + +caseless@~0.12.0: + version "0.12.0" + resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc" + integrity sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw== + +combined-stream@^1.0.6, combined-stream@~1.0.6: + version "1.0.8" + resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f" + integrity sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg== + dependencies: + delayed-stream "~1.0.0" + +core-util-is@1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" + integrity sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ== + +dashdash@^1.12.0: + version "1.14.1" + resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0" + integrity sha512-jRFi8UDGo6j+odZiEpjazZaWqEal3w/basFjQHQEwVtZJGDpxbH1MeYluwCS8Xq5wmLJooDlMgvVarmWfGM44g== + dependencies: + assert-plus "^1.0.0" + +delayed-stream@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" + integrity sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ== + +ecc-jsbn@~0.1.1: + version "0.1.2" + resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz#3a83a904e54353287874c564b7549386849a98c9" + integrity sha512-eh9O+hwRHNbG4BLTjEl3nw044CkGm5X6LoaCf7LPp7UU8Qrt47JYNi6nPX8xjW97TKGKm1ouctg0QSpZe9qrnw== + dependencies: + jsbn "~0.1.0" + safer-buffer "^2.1.0" + +extend@~3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa" + integrity sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g== + +extsprintf@1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.3.0.tgz#96918440e3041a7a414f8c52e3c574eb3c3e1e05" + integrity sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g== + +extsprintf@^1.2.0: + version "1.4.1" + resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.4.1.tgz#8d172c064867f235c0c84a596806d279bf4bcc07" + integrity sha512-Wrk35e8ydCKDj/ArClo1VrPVmN8zph5V4AtHwIuHhvMXsKf73UT3BOD+azBIW+3wOJ4FhEH7zyaJCFvChjYvMA== + +fast-deep-equal@^3.1.1: + version "3.1.3" + resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525" + integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== + +fast-json-stable-stringify@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633" + integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw== + +forever-agent@~0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91" + integrity sha512-j0KLYPhm6zeac4lz3oJ3o65qvgQCcPubiyotZrXqEaG4hNagNYO8qdlUrX5vwqv9ohqeT/Z3j6+yW067yWWdUw== + +form-data@~2.3.2: + version "2.3.3" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.3.3.tgz#dcce52c05f644f298c6a7ab936bd724ceffbf3a6" + integrity sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ== + dependencies: + asynckit "^0.4.0" + combined-stream "^1.0.6" + mime-types "^2.1.12" + +getpass@^0.1.1: + version "0.1.7" + resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa" + integrity sha512-0fzj9JxOLfJ+XGLhR8ze3unN0KZCgZwiSSDz168VERjK8Wl8kVSdcu2kspd4s4wtAa1y/qrVRiAA0WclVsu0ng== + dependencies: + assert-plus "^1.0.0" + +har-schema@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-2.0.0.tgz#a94c2224ebcac04782a0d9035521f24735b7ec92" + integrity sha512-Oqluz6zhGX8cyRaTQlFMPw80bSJVG2x/cFb8ZPhUILGgHka9SsokCCOQgpveePerqidZOrT14ipqfJb7ILcW5Q== + +har-validator@~5.1.3: + version "5.1.5" + resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-5.1.5.tgz#1f0803b9f8cb20c0fa13822df1ecddb36bde1efd" + integrity sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w== + dependencies: + ajv "^6.12.3" + har-schema "^2.0.0" + +http-signature@~1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.2.0.tgz#9aecd925114772f3d95b65a60abb8f7c18fbace1" + integrity sha512-CAbnr6Rz4CYQkLYUtSNXxQPUH2gK8f3iWexVlsnMeD+GjlsQ0Xsy1cOX+mN3dtxYomRy21CiOzU8Uhw6OwncEQ== + dependencies: + assert-plus "^1.0.0" + jsprim "^1.2.2" + sshpk "^1.7.0" + +is-typedarray@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a" + integrity sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA== + +isstream@~0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a" + integrity sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g== + +jsbn@~0.1.0: + version "0.1.1" + resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513" + integrity sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg== + +json-schema-traverse@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz#69f6a87d9513ab8bb8fe63bdb0979c448e684660" + integrity sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg== + +json-schema@0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.4.0.tgz#f7de4cf6efab838ebaeb3236474cbba5a1930ab5" + integrity sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA== + +json-stringify-safe@~5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" + integrity sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA== + +jsprim@^1.2.2: + version "1.4.2" + resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.2.tgz#712c65533a15c878ba59e9ed5f0e26d5b77c5feb" + integrity sha512-P2bSOMAc/ciLz6DzgjVlGJP9+BrJWu5UDGK70C2iweC5QBIeFf0ZXRvGjEj2uYgrY2MkAAhsSWHDWlFtEroZWw== + dependencies: + assert-plus "1.0.0" + extsprintf "1.3.0" + json-schema "0.4.0" + verror "1.10.0" + +mime-db@1.52.0: + version "1.52.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.52.0.tgz#bbabcdc02859f4987301c856e3387ce5ec43bf70" + integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg== + +mime-types@^2.1.12, mime-types@~2.1.19: + version "2.1.35" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.35.tgz#381a871b62a734450660ae3deee44813f70d959a" + integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw== + dependencies: + mime-db "1.52.0" + +oauth-sign@~0.9.0: + version "0.9.0" + resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.9.0.tgz#47a7b016baa68b5fa0ecf3dee08a85c679ac6455" + integrity sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ== + +performance-now@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b" + integrity sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow== + +psl@^1.1.28: + version "1.9.0" + resolved "https://registry.yarnpkg.com/psl/-/psl-1.9.0.tgz#d0df2a137f00794565fcaf3b2c00cd09f8d5a5a7" + integrity sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag== + +punycode@^2.1.0, punycode@^2.1.1: + version "2.3.1" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.3.1.tgz#027422e2faec0b25e1549c3e1bd8309b9133b6e5" + integrity sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg== + +qs@~6.5.2: + version "6.5.3" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.5.3.tgz#3aeeffc91967ef6e35c0e488ef46fb296ab76aad" + integrity sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA== + +request@^2.88.2: + version "2.88.2" + resolved "https://registry.yarnpkg.com/request/-/request-2.88.2.tgz#d73c918731cb5a87da047e207234146f664d12b3" + integrity sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw== + dependencies: + aws-sign2 "~0.7.0" + aws4 "^1.8.0" + caseless "~0.12.0" + combined-stream "~1.0.6" + extend "~3.0.2" + forever-agent "~0.6.1" + form-data "~2.3.2" + har-validator "~5.1.3" + http-signature "~1.2.0" + is-typedarray "~1.0.0" + isstream "~0.1.2" + json-stringify-safe "~5.0.1" + mime-types "~2.1.19" + oauth-sign "~0.9.0" + performance-now "^2.1.0" + qs "~6.5.2" + safe-buffer "^5.1.2" + tough-cookie "~2.5.0" + tunnel-agent "^0.6.0" + uuid "^3.3.2" + +safe-buffer@^5.0.1, safe-buffer@^5.1.2: + version "5.2.1" + resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6" + integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ== + +safer-buffer@^2.0.2, safer-buffer@^2.1.0, safer-buffer@~2.1.0: + version "2.1.2" + resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" + integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== + +sshpk@^1.7.0: + version "1.18.0" + resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.18.0.tgz#1663e55cddf4d688b86a46b77f0d5fe363aba028" + integrity sha512-2p2KJZTSqQ/I3+HX42EpYOa2l3f8Erv8MWKsy2I9uf4wA7yFIkXRffYdsx86y6z4vHtV8u7g+pPlr8/4ouAxsQ== + dependencies: + asn1 "~0.2.3" + assert-plus "^1.0.0" + bcrypt-pbkdf "^1.0.0" + dashdash "^1.12.0" + ecc-jsbn "~0.1.1" + getpass "^0.1.1" + jsbn "~0.1.0" + safer-buffer "^2.0.2" + tweetnacl "~0.14.0" + +tough-cookie@~2.5.0: + version "2.5.0" + resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.5.0.tgz#cd9fb2a0aa1d5a12b473bd9fb96fa3dcff65ade2" + integrity sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g== + dependencies: + psl "^1.1.28" + punycode "^2.1.1" + +tunnel-agent@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd" + integrity sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w== + dependencies: + safe-buffer "^5.0.1" + +tweetnacl@^0.14.3, tweetnacl@~0.14.0: + version "0.14.5" + resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64" + integrity sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA== + +uri-js@^4.2.2: + version "4.4.1" + resolved "https://registry.yarnpkg.com/uri-js/-/uri-js-4.4.1.tgz#9b1a52595225859e55f669d928f88c6c57f2a77e" + integrity sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg== + dependencies: + punycode "^2.1.0" + +uuid@^3.3.2: + version "3.4.0" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.4.0.tgz#b23e4358afa8a202fe7a100af1f5f883f02007ee" + integrity sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A== + +verror@1.10.0: + version "1.10.0" + resolved "https://registry.yarnpkg.com/verror/-/verror-1.10.0.tgz#3a105ca17053af55d6e270c1f8288682e18da400" + integrity sha512-ZZKSmDAEFOijERBLkmYfJ+vmk3w+7hOLYDNkRCuRuMJGEmqYNCNLyBBFwWKVMhfwaEF3WOd0Zlw86U/WC/+nYw== + dependencies: + assert-plus "^1.0.0" + core-util-is "1.0.2" + extsprintf "^1.2.0" From 2c8bb4c8ba64ad59104463299b7d1c6a38af2cdc Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Tue, 22 Oct 2024 11:26:03 -0400 Subject: [PATCH 082/104] removing a few warnings that don't apply to deployers --- internal/command/launch/plan_builder.go | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/internal/command/launch/plan_builder.go b/internal/command/launch/plan_builder.go index d8a9c487e3..11b1262b06 100644 --- a/internal/command/launch/plan_builder.go +++ b/internal/command/launch/plan_builder.go @@ -104,8 +104,6 @@ func (r *recoverableErrorBuilder) build() string { } func buildManifest(ctx context.Context, parentConfig *appconfig.Config, recoverableErrors *recoverableErrorBuilder) (*LaunchManifest, *planBuildCache, error) { - io := iostreams.FromContext(ctx) - appConfig, copiedConfig, err := determineBaseAppConfig(ctx) if err != nil { return nil, nil, err @@ -133,12 +131,6 @@ func buildManifest(ctx context.Context, parentConfig *appconfig.Config, recovera if err := appConfig.SetMachinesPlatform(); err != nil { return nil, nil, fmt.Errorf("can not use configuration for Fly Launch, check fly.toml: %w", err) } - if flag.GetBool(ctx, "manifest") { - fmt.Fprintln(io.ErrOut, - "Warning: --manifest does not serialize an entire app configuration.\n"+ - "Creating a manifest from an existing fly.toml may be a lossy process!", - ) - } if service := appConfig.HTTPService; service != nil { httpServicePort = service.InternalPort } else { @@ -423,7 +415,7 @@ func determineBaseAppConfig(ctx context.Context) (*appconfig.Config, bool, error existingConfig := appconfig.ConfigFromContext(ctx) if existingConfig != nil { - if existingConfig.AppName != "" { + if existingConfig.AppName != "" && !flag.IsSpecified(ctx, "copy-config") { fmt.Fprintln(io.Out, "An existing fly.toml file was found for app", existingConfig.AppName) } else { fmt.Fprintln(io.Out, "An existing fly.toml file was found") From 3a324521ffa1471e9c75b66935a96a209b2962f3 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Tue, 22 Oct 2024 11:37:40 -0400 Subject: [PATCH 083/104] use a depot builder scoped by app so they don't share cache and all that between different users/apps deployments --- deploy.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy.rb b/deploy.rb index 077189f1a4..9c711516dc 100755 --- a/deploy.rb +++ b/deploy.rb @@ -411,7 +411,7 @@ if DEPLOY_NOW in_step Step::DEPLOY do - exec_capture("flyctl deploy -a #{APP_NAME} --image #{image_ref} #{CONFIG_COMMAND_STRING}") + exec_capture("flyctl deploy -a #{APP_NAME} --image #{image_ref} --depot-scope=app #{CONFIG_COMMAND_STRING}") end end From 8efd9b63e1c2c5f7d9972b0f2640dfe46c59622d Mon Sep 17 00:00:00 2001 From: Jon Phenow Date: Wed, 23 Oct 2024 09:37:50 -0500 Subject: [PATCH 084/104] validate existence of fly.toml earlier when deploy-only --- deploy.rb | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/deploy.rb b/deploy.rb index 9c711516dc..a5e519dd0f 100755 --- a/deploy.rb +++ b/deploy.rb @@ -76,7 +76,7 @@ in_step Step::GIT_PULL do ref = get_env("GIT_REF") artifact Artifact::GIT_INFO, { repository: GIT_REPO, reference: ref } - + exec_capture("git init", log: false) redacted_repo_url = GIT_REPO_URL.dup @@ -118,7 +118,9 @@ "" end -if !DEPLOY_ONLY +if DEPLOY_ONLY + event :error, {type: :validation, message: "missing fly.toml" } if !HAS_FLY_CONFIG +else MANIFEST_PATH = "/tmp/manifest.json" manifest = in_step Step::PLAN do @@ -199,7 +201,7 @@ plugin = FLYCTL_TO_ASDF_PLUGIN_NAME.fetch(RUNTIME_LANGUAGE, RUNTIME_LANGUAGE) if plugin == "elixir" # required for elixir to work - exec_capture("asdf install erlang #{DEFAULT_ERLANG_VERSION}") + exec_capture("asdf install erlang #{DEFAULT_ERLANG_VERSION}") end exec_capture("asdf install #{plugin} #{version}") else @@ -442,4 +444,4 @@ $stderr.flush sleep 1.0 -end \ No newline at end of file +end From 47466bb482d883a9be4ec63094a0eeeedad5308a Mon Sep 17 00:00:00 2001 From: Jon Phenow Date: Wed, 23 Oct 2024 09:44:43 -0500 Subject: [PATCH 085/104] exit after error --- deploy.rb | 1 + 1 file changed, 1 insertion(+) diff --git a/deploy.rb b/deploy.rb index a5e519dd0f..fed77b1455 100755 --- a/deploy.rb +++ b/deploy.rb @@ -120,6 +120,7 @@ if DEPLOY_ONLY event :error, {type: :validation, message: "missing fly.toml" } if !HAS_FLY_CONFIG + exit 1 else MANIFEST_PATH = "/tmp/manifest.json" From 9ada865027c27b7fabcee5f6ada11d37453ce147 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Tue, 29 Oct 2024 13:59:20 -0400 Subject: [PATCH 086/104] Deployer: actually support Bun (#4042) catch SIGINT and print a thing merged master fix bun by pre-installing it and adding a test for it: --- .github/workflows/auto-release.yml | 2 + .github/workflows/checks.yml | 8 +- .github/workflows/ci-dev.yml | 5 +- .github/workflows/preflight.yml | 20 +- .github/workflows/preflight_cleanup.yml | 2 +- .github/workflows/release.yml | 12 ++ .goreleaser.2.yml | 10 + deploy.rb | 16 +- deployer.Dockerfile | 3 +- doc/main.go | 4 + flypg/cmd.go | 5 +- go.mod | 72 +++---- go.sum | 158 ++++++++-------- internal/appconfig/definition_test.go | 6 +- internal/appconfig/from_machine_set.go | 4 +- internal/command/console/console.go | 15 +- internal/command/deploy/machines.go | 2 +- .../deploy/machines_deploymachinesapp.go | 58 +++--- internal/command/doctor/diag/diag.go | 6 +- internal/command/launch/cmd.go | 15 ++ internal/command/launch/deploy.go | 2 +- internal/command/launch/plan_builder.go | 24 +-- internal/command/launch/sourceinfo.go | 2 +- internal/command/machine/egress_ip.go | 19 +- internal/command/postgres/create.go | 2 +- internal/command/postgres/failover.go | 3 +- internal/command/postgres/restart.go | 2 +- internal/command/root/root.go | 14 +- internal/command/ssh/sftp.go | 2 +- internal/command/ssh/ssh_terminal.go | 2 +- internal/command/tokens/list.go | 16 +- internal/command/tokens/revoke.go | 16 +- internal/machine/config.go | 2 +- internal/metrics/db.go | 3 +- internal/statuslogger/interactivelogger.go | 2 +- scanner/jsFramework.go | 8 +- test/deployer/deployer_test.go | 25 +++ test/fixtures/bun-basic/.gitignore | 175 ++++++++++++++++++ test/fixtures/bun-basic/README.md | 15 ++ test/fixtures/bun-basic/bun.lockb | Bin 0 -> 3134 bytes test/fixtures/bun-basic/index.ts | 5 + test/fixtures/bun-basic/package.json | 11 ++ test/fixtures/bun-basic/tsconfig.json | 27 +++ 43 files changed, 580 insertions(+), 220 deletions(-) create mode 100644 test/fixtures/bun-basic/.gitignore create mode 100644 test/fixtures/bun-basic/README.md create mode 100755 test/fixtures/bun-basic/bun.lockb create mode 100644 test/fixtures/bun-basic/index.ts create mode 100644 test/fixtures/bun-basic/package.json create mode 100644 test/fixtures/bun-basic/tsconfig.json diff --git a/.github/workflows/auto-release.yml b/.github/workflows/auto-release.yml index e706df67b9..536f112dad 100644 --- a/.github/workflows/auto-release.yml +++ b/.github/workflows/auto-release.yml @@ -10,6 +10,7 @@ permissions: jobs: release: + if: github.ref == 'refs/heads/master' runs-on: ubuntu-latest steps: - name: Checkout master branch @@ -26,6 +27,7 @@ jobs: DEFAULT_BUMP: "patch" sync_docs: + if: github.ref == 'refs/heads/master' needs: release runs-on: ubuntu-latest steps: diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index 004e4ff83a..4754266fab 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -19,10 +19,16 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + # Install Go since pre-commit below runs "go mod tidy". - uses: actions/setup-go@v5 with: go-version-file: "go.mod" check-latest: true + # pre-commit runs "pip install" which doesn't work under Debian's apt-instaled Python. + # https://packaging.python.org/en/latest/specifications/externally-managed-environments/#externally-managed-environments + - uses: actions/setup-python@v5 + with: + python-version: "3.13" - uses: pre-commit/action@v3.0.1 with: extra_args: --all-files --hook-stage=manual @@ -40,5 +46,5 @@ jobs: # check-latest: true - uses: golangci/golangci-lint-action@v6 with: - version: v1.54 + version: v1.61.0 working-directory: . diff --git a/.github/workflows/ci-dev.yml b/.github/workflows/ci-dev.yml index 552099f43f..7609ead32e 100644 --- a/.github/workflows/ci-dev.yml +++ b/.github/workflows/ci-dev.yml @@ -16,8 +16,7 @@ concurrency: jobs: test: - uses: - ./.github/workflows/test.yml + uses: ./.github/workflows/test.yml # create a dev tag for every branch except master tag_version: @@ -68,7 +67,7 @@ jobs: sha: context.sha }); - github-token: ${{ secrets.RELEASE_BOT_GITHUB_TOKEN }} + github-token: ${{ secrets.FLYIO_BUILDBOT_GITHUB_TOKEN }} # # we can remove this workflow_call once the release.yml workflow file is # # merged into the default branch diff --git a/.github/workflows/preflight.yml b/.github/workflows/preflight.yml index f8db5fe4aa..1bcac37e3f 100644 --- a/.github/workflows/preflight.yml +++ b/.github/workflows/preflight.yml @@ -3,10 +3,10 @@ name: Preflight Tests on: workflow_dispatch: inputs: - reason: - description: Brief reason for running this workflow manually + region: + description: Region required: false - default: User initiated run + default: ord type: string workflow_call: @@ -17,7 +17,6 @@ jobs: strategy: fail-fast: false matrix: - vm_size: [""] parallelism: [20] index: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19] steps: @@ -33,26 +32,27 @@ jobs: run: | curl -sfL https://raw.githubusercontent.com/Songmu/gotesplit/v0.2.1/install.sh | sh -s echo "FLY_PREFLIGHT_TEST_APP_PREFIX=pf-gha-$(openssl rand -hex 4)" >> "$GITHUB_ENV" + # If this workflow is triggered by code changes (eg PRs), download the binary to save time. - uses: actions/download-artifact@v4 with: name: flyctl path: master-build - - name: Move flyctl binary to correct directory - run: | - mv master-build/flyctl bin/flyctl - chmod +x bin/flyctl + continue-on-error: true + # But if this is a manual run, build the binary first. + - run: make - name: Run preflight tests id: preflight env: FLY_PREFLIGHT_TEST_ACCESS_TOKEN: ${{ secrets.FLYCTL_PREFLIGHT_CI_FLY_API_TOKEN }} FLY_PREFLIGHT_TEST_FLY_ORG: flyctl-ci-preflight - # This VM size is only available in ORD. - FLY_PREFLIGHT_TEST_FLY_REGIONS: ord + FLY_PREFLIGHT_TEST_FLY_REGIONS: ${{ inputs.region }} FLY_PREFLIGHT_TEST_NO_PRINT_HISTORY_ON_FAIL: "true" FLY_FORCE_TRACE: "true" FLY_PREFLIGHT_TEST_VM_SIZE: ${{ matrix.vm_size }} FLY_PREFLIGHT_TEST_APP_PREFIX: "preflight" run: | + (test -e master-build/flyctl) && mv master-build/flyctl bin/flyctl + chmod +x bin/flyctl export PATH=$PWD/bin:$PATH echo -n failed= >> $GITHUB_OUTPUT ./scripts/preflight.sh -r "${{ github.ref }}" -t "${{ matrix.parallelism }}" -i "${{ matrix.index }}" -o $GITHUB_OUTPUT diff --git a/.github/workflows/preflight_cleanup.yml b/.github/workflows/preflight_cleanup.yml index bf9a545bfa..146b7637ab 100644 --- a/.github/workflows/preflight_cleanup.yml +++ b/.github/workflows/preflight_cleanup.yml @@ -2,7 +2,7 @@ name: Preflight Tests Cleanup on: schedule: - - cron: '*/30 * * * *' + - cron: "*/30 * * * *" workflow_dispatch: inputs: reason: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 084d61af01..580348cc03 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -17,6 +17,7 @@ on: permissions: contents: write + packages: write # concurrency: # group: ${{ github.workflow }}-${{ github.ref }} @@ -81,6 +82,17 @@ jobs: with: path: dist/${{ matrix.GOOS }} key: ${{ matrix.GOOS }}-${{ needs.meta.outputs.sha_short }} + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.FLYIOBUILDS_DOCKERHUB_USERNAME }} + password: ${{ secrets.FLYIOBUILDS_DOCKERHUB_TOKEN }} + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - name: Run GoReleaser if: steps.cache.outputs.cache-hit != 'true' # do not run if cache hit uses: goreleaser/goreleaser-action@v5 diff --git a/.goreleaser.2.yml b/.goreleaser.2.yml index bd55e9cd5e..0d561341c5 100644 --- a/.goreleaser.2.yml +++ b/.goreleaser.2.yml @@ -94,5 +94,15 @@ archives: wrap_in_directory: false format: zip +dockers: + - goos: linux + goarch: amd64 + image_templates: + - "flyio/flyctl:latest" + - "flyio/flyctl:v{{ .Version }}" + - "ghcr.io/superfly/flyctl:latest" + - "ghcr.io/superfly/flyctl:v{{ .Version }}" + skip_push: auto + release: disable: false diff --git a/deploy.rb b/deploy.rb index fed77b1455..70db2cbcbd 100755 --- a/deploy.rb +++ b/deploy.rb @@ -7,6 +7,11 @@ begin +Signal.trap('INT') do + event :cancel, { signal: "SIGINT" } + exit 0 +end + event :start, { ts: ts() } # Change to a directory where we'll pull on git @@ -76,7 +81,7 @@ in_step Step::GIT_PULL do ref = get_env("GIT_REF") artifact Artifact::GIT_INFO, { repository: GIT_REPO, reference: ref } - + exec_capture("git init", log: false) redacted_repo_url = GIT_REPO_URL.dup @@ -118,10 +123,7 @@ "" end -if DEPLOY_ONLY - event :error, {type: :validation, message: "missing fly.toml" } if !HAS_FLY_CONFIG - exit 1 -else +if !DEPLOY_ONLY MANIFEST_PATH = "/tmp/manifest.json" manifest = in_step Step::PLAN do @@ -202,7 +204,7 @@ plugin = FLYCTL_TO_ASDF_PLUGIN_NAME.fetch(RUNTIME_LANGUAGE, RUNTIME_LANGUAGE) if plugin == "elixir" # required for elixir to work - exec_capture("asdf install erlang #{DEFAULT_ERLANG_VERSION}") + exec_capture("asdf install erlang #{DEFAULT_ERLANG_VERSION}") end exec_capture("asdf install #{plugin} #{version}") else @@ -445,4 +447,4 @@ $stderr.flush sleep 1.0 -end +end \ No newline at end of file diff --git a/deployer.Dockerfile b/deployer.Dockerfile index 14a91ea1c5..941598e470 100644 --- a/deployer.Dockerfile +++ b/deployer.Dockerfile @@ -60,7 +60,8 @@ RUN git clone https://github.com/asdf-vm/asdf.git $HOME/.asdf --branch v0.14.0 & asdf install erlang $DEFAULT_ERLANG_VERSION && asdf global erlang $DEFAULT_ERLANG_VERSION && \ asdf install elixir $DEFAULT_ELIXIR_VERSION && asdf global elixir $DEFAULT_ELIXIR_VERSION && \ # bun - asdf plugin add bun https://github.com/cometkim/asdf-bun.git + asdf plugin add bun https://github.com/cometkim/asdf-bun.git && \ + asdf install bun $DEFAULT_BUN_VERSION && asdf global bun $DEFAULT_BUN_VERSION ENV MIX_ENV=dev diff --git a/doc/main.go b/doc/main.go index 6458beae54..7b9ea65e92 100644 --- a/doc/main.go +++ b/doc/main.go @@ -26,6 +26,10 @@ func main() { cmd := cli.NewRootCommand() cmd.DisableAutoGenTag = true + // Override root command to always be `fly`, + // Otherwise it could be `main`, `flyctl` or whatever name is set to the executable + cmd.Use = "fly" + filePrepender := func(filename string) string { return "" } diff --git a/flypg/cmd.go b/flypg/cmd.go index c9f9444742..d235ff32fd 100644 --- a/flypg/cmd.go +++ b/flypg/cmd.go @@ -4,6 +4,7 @@ import ( "context" "encoding/base64" "encoding/json" + "errors" "fmt" fly "github.com/superfly/fly-go" @@ -73,7 +74,7 @@ func (pc *Command) UpdateSettings(ctx context.Context, leaderIp string, config m } if !result.Success { - return fmt.Errorf(result.Message) + return errors.New(result.Message) } return nil @@ -94,7 +95,7 @@ func (pc *Command) UnregisterMember(ctx context.Context, leaderIP string, standb } if !result.Success { - return fmt.Errorf(result.Message) + return errors.New(result.Message) } return nil diff --git a/go.mod b/go.mod index c772ecce76..585d1831e4 100644 --- a/go.mod +++ b/go.mod @@ -13,9 +13,9 @@ require ( github.com/PuerkitoBio/rehttp v1.4.0 github.com/alecthomas/chroma v0.10.0 github.com/avast/retry-go/v4 v4.6.0 - github.com/aws/aws-sdk-go-v2/config v1.27.43 - github.com/aws/aws-sdk-go-v2/credentials v1.17.41 - github.com/aws/aws-sdk-go-v2/service/s3 v1.65.2 + github.com/aws/aws-sdk-go-v2/config v1.28.1 + github.com/aws/aws-sdk-go-v2/credentials v1.17.42 + github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2 github.com/azazeal/pause v1.3.0 github.com/blang/semver v3.5.1+incompatible github.com/briandowns/spinner v1.23.1 @@ -25,14 +25,14 @@ require ( github.com/chzyer/readline v1.5.1 github.com/cli/safeexec v1.0.1 github.com/coder/websocket v1.8.12 - github.com/containerd/continuity v0.4.3 - github.com/depot/depot-go v0.3.0 + github.com/containerd/continuity v0.4.4 + github.com/depot/depot-go v0.5.0 github.com/docker/docker v27.3.1+incompatible github.com/docker/go-connections v0.5.0 github.com/docker/go-units v0.5.0 github.com/dustin/go-humanize v1.0.1 github.com/ejcx/sshcert v1.1.0 - github.com/getsentry/sentry-go v0.29.0 + github.com/getsentry/sentry-go v0.29.1 github.com/go-kit/log v0.2.1 github.com/go-logr/logr v1.4.2 github.com/gofrs/flock v0.12.1 @@ -46,7 +46,7 @@ require ( github.com/jinzhu/copier v0.4.0 github.com/jpillora/backoff v1.0.0 github.com/kr/text v0.2.0 - github.com/launchdarkly/go-sdk-common/v3 v3.1.0 + github.com/launchdarkly/go-sdk-common/v3 v3.2.0 github.com/logrusorgru/aurora v2.0.3+incompatible github.com/mattn/go-colorable v0.1.13 github.com/mattn/go-isatty v0.0.20 @@ -64,9 +64,9 @@ require ( github.com/opencontainers/image-spec v1.1.0 github.com/pelletier/go-toml/v2 v2.2.3 github.com/pkg/errors v0.9.1 - github.com/pkg/sftp v1.13.6 + github.com/pkg/sftp v1.13.7 github.com/prometheus/blackbox_exporter v0.25.0 - github.com/prometheus/client_golang v1.20.4 + github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_model v0.6.1 github.com/r3labs/diff v1.1.0 github.com/samber/lo v1.47.0 @@ -76,20 +76,20 @@ require ( github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.9.0 - github.com/superfly/fly-go v0.1.32 + github.com/superfly/fly-go v0.1.34 github.com/superfly/graphql v0.2.4 github.com/superfly/lfsc-go v0.1.1 github.com/superfly/macaroon v0.2.14-0.20240819201738-61a02aa53648 github.com/superfly/tokenizer v0.0.3-0.20240826174224-a17a2e0a9dc0 - github.com/vektah/gqlparser/v2 v2.5.17 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 - go.opentelemetry.io/otel v1.30.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 - go.opentelemetry.io/otel/sdk v1.30.0 - go.opentelemetry.io/otel/trace v1.30.0 + github.com/vektah/gqlparser/v2 v2.5.18 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 + go.opentelemetry.io/otel v1.31.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 + go.opentelemetry.io/otel/sdk v1.31.0 + go.opentelemetry.io/otel/trace v1.31.0 golang.org/x/crypto v0.28.0 - golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 + golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c golang.org/x/mod v0.21.0 golang.org/x/net v0.30.0 golang.org/x/sync v0.8.0 @@ -128,27 +128,27 @@ require ( github.com/alexflint/go-scalar v1.2.0 // indirect github.com/andybalholm/brotli v1.1.0 // indirect github.com/apex/log v1.9.0 // indirect - github.com/aws/aws-sdk-go-v2 v1.32.2 // indirect + github.com/aws/aws-sdk-go-v2 v1.32.3 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.18 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.22 // indirect github.com/aws/aws-sdk-go-v2/service/ecr v1.24.5 // indirect github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.21.5 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.24.2 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.32.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.3 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.32.3 // indirect github.com/aws/smithy-go v1.22.0 // indirect github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231213181459-b0fcec718dc6 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bmatcuk/doublestar/v4 v4.6.1 // indirect + github.com/bmatcuk/doublestar/v4 v4.7.1 // indirect github.com/buildpacks/imgutil v0.0.0-20240605145725-186f89b2d168 // indirect github.com/buildpacks/lifecycle v0.19.6 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect @@ -258,18 +258,18 @@ require ( github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 // indirect - go.opentelemetry.io/otel/metric v1.30.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect + go.opentelemetry.io/otel/metric v1.31.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect golang.org/x/oauth2 v0.22.0 // indirect - golang.org/x/tools v0.25.0 // indirect + golang.org/x/tools v0.26.0 // indirect golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/protobuf v1.34.2 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 // indirect + google.golang.org/protobuf v1.35.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gvisor.dev/gvisor v0.0.0-20230927004350-cbd86285d259 // indirect diff --git a/go.sum b/go.sum index 7068b6989e..1f0ed0636e 100644 --- a/go.sum +++ b/go.sum @@ -96,44 +96,44 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/avast/retry-go/v4 v4.6.0 h1:K9xNA+KeB8HHc2aWFuLb25Offp+0iVRXEvFx8IinRJA= github.com/avast/retry-go/v4 v4.6.0/go.mod h1:gvWlPhBVsvBbLkVGDg/KwvBv0bEkCOLRRSHKIr2PyOE= github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go-v2 v1.32.2 h1:AkNLZEyYMLnx/Q/mSKkcMqwNFXMAvFto9bNsHqcTduI= -github.com/aws/aws-sdk-go-v2 v1.32.2/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= +github.com/aws/aws-sdk-go-v2 v1.32.3 h1:T0dRlFBKcdaUPGNtkBSwHZxrtis8CQU17UpNBZYd0wk= +github.com/aws/aws-sdk-go-v2 v1.32.3/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 h1:pT3hpW0cOHRJx8Y0DfJUEQuqPild8jRGmSFmBgvydr0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6/go.mod h1:j/I2++U0xX+cr44QjHay4Cvxj6FUbnxrgmqN3H1jTZA= -github.com/aws/aws-sdk-go-v2/config v1.27.43 h1:p33fDDihFC390dhhuv8nOmX419wjOSDQRb+USt20RrU= -github.com/aws/aws-sdk-go-v2/config v1.27.43/go.mod h1:pYhbtvg1siOOg8h5an77rXle9tVG8T+BWLWAo7cOukc= -github.com/aws/aws-sdk-go-v2/credentials v1.17.41 h1:7gXo+Axmp+R4Z+AK8YFQO0ZV3L0gizGINCOWxSLY9W8= -github.com/aws/aws-sdk-go-v2/credentials v1.17.41/go.mod h1:u4Eb8d3394YLubphT4jLEwN1rLNq2wFOlT6OuxFwPzU= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 h1:TMH3f/SCAWdNtXXVPPu5D6wrr4G5hI1rAxbcocKfC7Q= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17/go.mod h1:1ZRXLdTpzdJb9fwTMXiLipENRxkGMTn1sfKexGllQCw= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 h1:UAsR3xA31QGf79WzpG/ixT9FZvQlh5HY1NRqSHBNOCk= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21/go.mod h1:JNr43NFf5L9YaG3eKTm7HQzls9J+A9YYcGI5Quh1r2Y= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 h1:6jZVETqmYCadGFvrYEQfC5fAQmlo80CeL5psbno6r0s= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21/go.mod h1:1SR0GbLlnN3QUmYaflZNiH1ql+1qrSiB2vwcJ+4UM60= +github.com/aws/aws-sdk-go-v2/config v1.28.1 h1:oxIvOUXy8x0U3fR//0eq+RdCKimWI900+SV+10xsCBw= +github.com/aws/aws-sdk-go-v2/config v1.28.1/go.mod h1:bRQcttQJiARbd5JZxw6wG0yIK3eLeSCPdg6uqmmlIiI= +github.com/aws/aws-sdk-go-v2/credentials v1.17.42 h1:sBP0RPjBU4neGpIYyx8mkU2QqLPl5u9cmdTWVzIpHkM= +github.com/aws/aws-sdk-go-v2/credentials v1.17.42/go.mod h1:FwZBfU530dJ26rv9saAbxa9Ej3eF/AK0OAY86k13n4M= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.18 h1:68jFVtt3NulEzojFesM/WVarlFpCaXLKaBxDpzkQ9OQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.18/go.mod h1:Fjnn5jQVIo6VyedMc0/EhPpfNlPl7dHV916O6B+49aE= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22 h1:Jw50LwEkVjuVzE1NzkhNKkBf9cRN7MtE1F/b2cOKTUM= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22/go.mod h1:Y/SmAyPcOTmpeVaWSzSKiILfXTVJwrGmYZhcRbhWuEY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22 h1:981MHwBaRZM7+9QSR6XamDzF/o7ouUGxFzr+nVSIhrs= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22/go.mod h1:1RA1+aBEfn+CAB/Mh0MB6LsdCYCnjZm7tKXtnk499ZQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21 h1:7edmS3VOBDhK00b/MwGtGglCm7hhwNYnjJs/PgFdMQE= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21/go.mod h1:Q9o5h4HoIWG8XfzxqiuK/CGUbepCJ8uTlaE3bAbxytQ= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.22 h1:yV+hCAHZZYJQcwAaszoBNwLbPItHvApxT0kVIw6jRgs= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.22/go.mod h1:kbR1TL8llqB1eGnVbybcA4/wgScxdylOdyAd51yxPdw= github.com/aws/aws-sdk-go-v2/service/ecr v1.24.5 h1:wLPDAUFT50NEXGXpywRU3AA74pg35RJjWol/68ruvQQ= github.com/aws/aws-sdk-go-v2/service/ecr v1.24.5/go.mod h1:AOHmGMoPtSY9Zm2zBuwUJQBisIvYAZeA1n7b6f4e880= github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.21.5 h1:PQp21GBlGNaQ+AVJAB8w2KTmLx0DkFS2fDET2Iy3+f0= github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.21.5/go.mod h1:WMntdAol8KgeYsa5sDZPsRTXs4jVZIMYu0eQVVIQxnc= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 h1:TToQNkvGguu209puTojY/ozlqy2d/SFNcoLIqTFi42g= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0/go.mod h1:0jp+ltwkf+SwG2fm/PKo8t4y8pJSgOCO4D8Lz3k0aHQ= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.2 h1:4FMHqLfk0efmTqhXVRL5xYRqlEBNBiRI7N6w4jsEdd4= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.2/go.mod h1:LWoqeWlK9OZeJxsROW2RqrSPvQHKTpp69r/iDjwsSaw= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2 h1:s7NA1SOw8q/5c0wr8477yOPp0z+uBaXBnLE0XYb0POA= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2/go.mod h1:fnjjWyAW/Pj5HYOxl9LJqWtEwS7W2qgcRLWP+uWbss0= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.2 h1:t7iUP9+4wdc5lt3E41huP+GvQZJD38WLsgVp4iOtAjg= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.2/go.mod h1:/niFCtmuQNxqx9v8WAPq5qh7EH25U4BF6tjoyq9bObM= -github.com/aws/aws-sdk-go-v2/service/s3 v1.65.2 h1:yi8m+jepdp6foK14xXLGkYBenxnlcfJ45ka4Pg7fDSQ= -github.com/aws/aws-sdk-go-v2/service/s3 v1.65.2/go.mod h1:cB6oAuus7YXRZhWCc1wIwPywwZ1XwweNp2TVAEGYeB8= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.2 h1:bSYXVyUzoTHoKalBmwaZxs97HU9DWWI3ehHSAMa7xOk= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.2/go.mod h1:skMqY7JElusiOUjMJMOv1jJsP7YUg7DrhgqZZWuzu1U= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2 h1:AhmO1fHINP9vFYUE0LHzCWg/LfUWUF+zFPEcY9QXb7o= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2/go.mod h1:o8aQygT2+MVP0NaV6kbdE1YnnIM8RRVQzoeUH45GOdI= -github.com/aws/aws-sdk-go-v2/service/sts v1.32.2 h1:CiS7i0+FUe+/YY1GvIBLLrR/XNGZ4CtM1Ll0XavNuVo= -github.com/aws/aws-sdk-go-v2/service/sts v1.32.2/go.mod h1:HtaiBI8CjYoNVde8arShXb94UbQQi9L4EMr6D+xGBwo= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3 h1:kT6BcZsmMtNkP/iYMcRG+mIEA/IbeiUimXtGmqF39y0= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3/go.mod h1:Z8uGua2k4PPaGOYn66pK02rhMrot3Xk3tpBuUFPomZU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3 h1:qcxX0JYlgWH3hpPUnd6U0ikcl6LLA9sLkXE2w1fpMvY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3/go.mod h1:cLSNEmI45soc+Ef8K/L+8sEA3A3pYFEYf5B5UI+6bH4= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3 h1:ZC7Y/XgKUxwqcdhO5LE8P6oGP1eh6xlQReWNKfhvJno= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3/go.mod h1:WqfO7M9l9yUAw0HcHaikwRd/H6gzYdz7vjejCA5e2oY= +github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2 h1:p9TNFL8bFUMd+38YIpTAXpoxyz0MxC7FlbFEH4P4E1U= +github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2/go.mod h1:fNjyo0Coen9QTwQLWeV6WO2Nytwiu+cCcWaTdKCAqqE= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.3 h1:UTpsIf0loCIWEbrqdLb+0RxnTXfWh2vhw4nQmFi4nPc= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.3/go.mod h1:FZ9j3PFHHAR+w0BSEjK955w5YD2UwB/l/H0yAK3MJvI= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.3 h1:2YCmIXv3tmiItw0LlYf6v7gEHebLY45kBEnPezbUKyU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.3/go.mod h1:u19stRyNPxGhj6dRm+Cdgu6N75qnbW7+QN0q0dsAk58= +github.com/aws/aws-sdk-go-v2/service/sts v1.32.3 h1:wVnQ6tigGsRqSWDEEyH6lSAJ9OyFUsSnbaUWChuSGzs= +github.com/aws/aws-sdk-go-v2/service/sts v1.32.3/go.mod h1:VZa9yTFyj4o10YGsmDO4gbQJUvvhY72fhumT8W4LqsE= github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM= github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231213181459-b0fcec718dc6 h1:PlJRmqKlSlEUlwem1c3zdPaEMtJc/ktnV7naD5Qvsx4= @@ -153,8 +153,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwNy7PA4I= -github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/bmatcuk/doublestar/v4 v4.7.1 h1:fdDeAqgT47acgwd9bd9HxJRDmc9UAmPpc+2m0CXv75Q= +github.com/bmatcuk/doublestar/v4 v4.7.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bradleyjkemp/cupaloy/v2 v2.6.0 h1:knToPYa2xtfg42U3I6punFEjaGFKWQRXJwj0JTv4mTs= github.com/bradleyjkemp/cupaloy/v2 v2.6.0/go.mod h1:bm7JXdkRd4BHJk9HpwqAI8BoAY1lps46Enkdqw6aRX0= github.com/briandowns/spinner v1.23.1 h1:t5fDPmScwUjozhDj4FA46p5acZWIPXYE30qW2Ptu650= @@ -202,8 +202,8 @@ github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= github.com/containerd/containerd v1.7.16 h1:7Zsfe8Fkj4Wi2My6DXGQ87hiqIrmOXolm72ZEkFU5Mg= github.com/containerd/containerd v1.7.16/go.mod h1:NL49g7A/Fui7ccmxV6zkBWwqMgmMxFWzujYCc+JLt7k= -github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8= -github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= +github.com/containerd/continuity v0.4.4 h1:/fNVfTJ7wIl/YPMHjf+5H32uFhl63JucB34PlCpMKII= +github.com/containerd/continuity v0.4.4/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY= github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= @@ -227,8 +227,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/depot/depot-go v0.3.0 h1:OEGwIZS5nqfy4YHZVDduGxTX/IG+0VgHTx+kjxZ/SYU= -github.com/depot/depot-go v0.3.0/go.mod h1:9xKcGBd3HlDFcFkRbbdOWF/+2bBG0aFtpZAI+5rvfDc= +github.com/depot/depot-go v0.5.0 h1:OBx/B0DGviHVG+TDmhdpaQA1anTGlQviwBrhR9TKTmQ= +github.com/depot/depot-go v0.5.0/go.mod h1:9xKcGBd3HlDFcFkRbbdOWF/+2bBG0aFtpZAI+5rvfDc= github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7cNTs5R6Hk4V2lcmLz2NsG2VnInyNo= github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= @@ -285,8 +285,8 @@ github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo github.com/gdamore/tcell/v2 v2.4.1-0.20210905002822-f057f0a857a1/go.mod h1:Az6Jt+M5idSED2YPGtwnfJV0kXohgdCBPmHGSYc1r04= github.com/gdamore/tcell/v2 v2.7.4 h1:sg6/UnTM9jGpZU+oFYAsDahfchWAFW8Xx2yFinNSAYU= github.com/gdamore/tcell/v2 v2.7.4/go.mod h1:dSXtXTSK0VsW1biw65DZLZ2NKr7j0qP/0J7ONmsraWg= -github.com/getsentry/sentry-go v0.29.0 h1:YtWluuCFg9OfcqnaujpY918N/AhCCwarIDWOYSBAjCA= -github.com/getsentry/sentry-go v0.29.0/go.mod h1:jhPesDAL0Q0W2+2YEuVOvdWmVtdsr1+jtBrlDEVWwLY= +github.com/getsentry/sentry-go v0.29.1 h1:DyZuChN8Hz3ARxGVV8ePaNXh1dQ7d76AiB117xcREwA= +github.com/getsentry/sentry-go v0.29.1/go.mod h1:x3AtIzN01d6SiWkderzaH28Tm0lgkafpJ5Bm3li39O0= github.com/gliderlabs/ssh v0.3.7 h1:iV3Bqi942d9huXnzEF2Mt+CY9gLu8DNM4Obd+8bODRE= github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= @@ -432,8 +432,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/launchdarkly/go-jsonstream/v3 v3.0.0 h1:qJF/WI09EUJ7kSpmP5d1Rhc81NQdYUhP17McKfUq17E= github.com/launchdarkly/go-jsonstream/v3 v3.0.0/go.mod h1:/1Gyml6fnD309JOvunOSfyysWbZ/ZzcA120gF/cQtC4= -github.com/launchdarkly/go-sdk-common/v3 v3.1.0 h1:KNCP5rfkOt/25oxGLAVgaU1BgrZnzH9Y/3Z6I8bMwDg= -github.com/launchdarkly/go-sdk-common/v3 v3.1.0/go.mod h1:mXFmDGEh4ydK3QilRhrAyKuf9v44VZQWnINyhqbbOd0= +github.com/launchdarkly/go-sdk-common/v3 v3.2.0 h1:LzwlrXRBPC7NjdbnDxio8YGHMvDrNb4i6lbjpLgwsyk= +github.com/launchdarkly/go-sdk-common/v3 v3.2.0/go.mod h1:mXFmDGEh4ydK3QilRhrAyKuf9v44VZQWnINyhqbbOd0= github.com/launchdarkly/go-test-helpers/v3 v3.0.1 h1:Z4lUVrh7+hIvL47KVjEBE/owbqqjKUEYTp4aBX/5OZM= github.com/launchdarkly/go-test-helpers/v3 v3.0.1/go.mod h1:u2ZvJlc/DDJTFrshWW50tWMZHLVYXofuSHUfTU/eIwM= github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8= @@ -544,8 +544,8 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= -github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= +github.com/pkg/sftp v1.13.7 h1:uv+I3nNJvlKZIQGSr8JVQLNHFU9YhhNpvC14Y6KgmSM= +github.com/pkg/sftp v1.13.7/go.mod h1:KMKI0t3T6hfA+lTR/ssZdunHo+uwq7ghoN09/FSu3DY= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -554,8 +554,8 @@ github.com/prometheus/blackbox_exporter v0.25.0/go.mod h1:SpTDn8xW1XOstBQ1uVgw54 github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= -github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -579,8 +579,8 @@ github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw= github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDjyw0ULyrTYWeN0UNCCkmCWfjPnIA2W6oviI= github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs= @@ -642,8 +642,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/superfly/fly-go v0.1.32 h1:PfO75y+X/e5j+DT+MQjaKLDa7rhqk5NmcGIz4bQLuoQ= -github.com/superfly/fly-go v0.1.32/go.mod h1:FfFgQk88G60rZWnpcCqgufk5CZoCjZznLws699gswWU= +github.com/superfly/fly-go v0.1.34 h1:h6tL+z8VqMH4yHM+YwbC8fib3PquUB3IrGu+0cxuFNw= +github.com/superfly/fly-go v0.1.34/go.mod h1:ZLXEOA1TpJz89A3tUBpzW3/foX+HGLaVtudSeUymj/w= github.com/superfly/graphql v0.2.4 h1:Av8hSk4x8WvKJ6MTnEwrLknSVSGPc7DWpgT3z/kt3PU= github.com/superfly/graphql v0.2.4/go.mod h1:CVfDl31srm8HnJ9udwLu6hFNUW/P6GUM2dKcG1YQ8jc= github.com/superfly/lfsc-go v0.1.1 h1:dGjLgt81D09cG+aR9lJZIdmonjZSR5zYCi7s54+ZU2Q= @@ -669,8 +669,8 @@ github.com/tonistiigi/vt100 v0.0.0-20230623042737-f9a4f7ef6531 h1:Y/M5lygoNPKwVN github.com/tonistiigi/vt100 v0.0.0-20230623042737-f9a4f7ef6531/go.mod h1:ulncasL3N9uLrVann0m+CDlJKWsIAP34MPcOJF6VRvc= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= -github.com/vektah/gqlparser/v2 v2.5.17 h1:9At7WblLV7/36nulgekUgIaqHZWn5hxqluxrxGUhOmI= -github.com/vektah/gqlparser/v2 v2.5.17/go.mod h1:1lz1OeCqgQbQepsGxPVywrjdBHW2T08PUS3pJqepRww= +github.com/vektah/gqlparser/v2 v2.5.18 h1:zSND3GtutylAQ1JpWnTHcqtaRZjl+y3NROeW8vuNo6Y= +github.com/vektah/gqlparser/v2 v2.5.18/go.mod h1:6HLzf7JKv9Fi3APymudztFQNmLXR5qJeEo6BOFcXVfc= github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= @@ -691,22 +691,22 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= -go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= -go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 h1:umZgi92IyxfXd/l4kaDhnKgY8rnN/cZcF1LKc6I8OQ8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0/go.mod h1:4lVs6obhSVRb1EW5FhOuBTyiQhtRtAnnva9vD3yRfq8= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 h1:kn1BudCgwtE7PxLqcZkErpD8GKqLZ6BSzeW9QihQJeM= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0/go.mod h1:ljkUDtAMdleoi9tIG1R6dJUpVwDcYjw3J2Q6Q/SuiC0= -go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= -go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= -go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= -go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= -go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= -go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 h1:UGZ1QwZWY67Z6BmckTU+9Rxn04m2bD3gD6Mk0OIOCPk= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0/go.mod h1:fcwWuDuaObkkChiDlhEpSq9+X1C0omv+s5mBtToAQ64= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -723,15 +723,15 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= -golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= +golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY= +golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -760,10 +760,10 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -807,6 +807,8 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= @@ -814,10 +816,11 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= @@ -829,6 +832,7 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= @@ -846,8 +850,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= -golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -863,10 +867,10 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= -google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= -google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 h1:T6rh4haD3GVYsgEfWExoCZA2o2FmbNyKpTuAxbEFPTg= +google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:wp2WsuBYj6j8wUdo3ToZsdxxixbvQNAHqVJrTgi5E5M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 h1:QCqS/PdaHTSWGvupk2F/ehwHtGc0/GYkT+3GAcR1CCc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -876,8 +880,8 @@ google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/appconfig/definition_test.go b/internal/appconfig/definition_test.go index bd63b806ae..7da99bb777 100644 --- a/internal/appconfig/definition_test.go +++ b/internal/appconfig/definition_test.go @@ -266,13 +266,15 @@ func TestToDefinition(t *testing.T) { }, "metrics": []any{ map[string]any{ - "port": int64(9999), - "path": "/metrics", + "port": int64(9999), + "path": "/metrics", + "https": false, }, map[string]any{ "port": int64(9998), "path": "/metrics", "processes": []any{"web"}, + "https": false, }, }, "statics": []any{ diff --git a/internal/appconfig/from_machine_set.go b/internal/appconfig/from_machine_set.go index a5ac1942c7..bc18f4c500 100644 --- a/internal/appconfig/from_machine_set.go +++ b/internal/appconfig/from_machine_set.go @@ -209,12 +209,12 @@ func processGroupsFromMachineSet(ctx context.Context, ms machine.MachineSet) (*p for _, cmd := range report.others { otherCmds += fmt.Sprintf(" %s\n", cmd) } - warningMsg += warning("processes", fmt.Sprintf(`Found these additional commands on some machines. Consider adding process groups to your fly.toml and run machines with those process groups. + warningMsg += warning("processes", `Found these additional commands on some machines. Consider adding process groups to your fly.toml and run machines with those process groups. For more info please see: https://fly.io/docs/reference/configuration/#the-processes-section Machine IDs that were not saved to fly.toml: %s Commands they are running: %s -`, strings.Join(otherMachineIds, ", "), otherCmds)) +`, strings.Join(otherMachineIds, ", "), otherCmds) warningMsg += "\n" } diff --git a/internal/command/console/console.go b/internal/command/console/console.go index a9137bc81b..3d9b3b8ad0 100644 --- a/internal/command/console/console.go +++ b/internal/command/console/console.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "maps" - "slices" "github.com/google/shlex" "github.com/samber/lo" @@ -401,18 +400,8 @@ func makeEphemeralConsoleMachine(ctx context.Context, app *fly.AppCompact, appCo func determineEphemeralConsoleMachineGuest(ctx context.Context, appConfig *appconfig.Config) (*fly.MachineGuest, error) { var guest *fly.MachineGuest - haveConsoleVMSection := 0 <= slices.IndexFunc( - appConfig.Compute, - func(c *appconfig.Compute) bool { - return slices.Index(c.Processes, "console") >= 0 - }, - ) - if haveConsoleVMSection { - groupConfig, err := appConfig.ToMachineConfig("console", nil) - if err != nil { - return nil, fmt.Errorf("failed to check Machine configuration for the 'console' group: %w", err) - } - guest = groupConfig.Guest + if compute := appConfig.ComputeForGroup("console"); compute != nil { + guest = compute.MachineGuest } guest, err := flag.GetMachineGuest(ctx, guest) diff --git a/internal/command/deploy/machines.go b/internal/command/deploy/machines.go index 650b4c8e24..8dca79ee72 100644 --- a/internal/command/deploy/machines.go +++ b/internal/command/deploy/machines.go @@ -174,7 +174,7 @@ func NewMachineDeployment(ctx context.Context, args MachineDeploymentArgs) (_ Ma // TODO: Blend extraInfo into ValidationError and remove this hack if err, extraInfo := appConfig.ValidateGroups(ctx, lo.Keys(args.ProcessGroups)); err != nil { - fmt.Fprintf(io.ErrOut, extraInfo) + fmt.Fprint(io.ErrOut, extraInfo) tracing.RecordError(span, err, "failed to validate process groups") return nil, err } diff --git a/internal/command/deploy/machines_deploymachinesapp.go b/internal/command/deploy/machines_deploymachinesapp.go index ccc1ef3618..51261f77b5 100644 --- a/internal/command/deploy/machines_deploymachinesapp.go +++ b/internal/command/deploy/machines_deploymachinesapp.go @@ -1263,7 +1263,7 @@ type smokeChecksError struct { } func (s smokeChecksError) Error() string { - return s.err.Error() + return fmt.Sprintf("smoke checks for %s failed: %s", s.machineID, s.err) } func (s smokeChecksError) Unwrap() error { @@ -1279,53 +1279,51 @@ func (s smokeChecksError) Suggestion() string { return suggestion } -func (md *machineDeployment) doSmokeChecks(ctx context.Context, lm machine.LeasableMachine, showLogs bool) (err error) { +func (md *machineDeployment) doSmokeChecks(ctx context.Context, lm machine.LeasableMachine, showLogs bool) error { ctx, span := tracing.GetTracer().Start(ctx, "smoke_checks", trace.WithAttributes(attribute.String("machine.ID", lm.Machine().ID))) defer span.End() + if md.skipSmokeChecks { span.AddEvent("skipped") return nil } - err = lm.WaitForSmokeChecksToPass(ctx) + err := lm.WaitForSmokeChecksToPass(ctx) if err == nil { return nil } + smokeErr := &smokeChecksError{ + machineID: lm.Machine().ID, + err: err, + } + if showLogs { resumeLogFn := statuslogger.Pause(ctx) defer resumeLogFn() - } - logs, _, logErr := md.apiClient.GetAppLogs(ctx, md.app.Name, "", md.appConfig.PrimaryRegion, lm.Machine().ID) - if fly.IsNotAuthenticatedError(logErr) && showLogs { - span.AddEvent("not authorized to retrieve logs") - fmt.Fprintf(md.io.ErrOut, "Warn: not authorized to retrieve app logs (this can happen when using deploy tokens), so we can't show you what failed. Use `fly logs -i %s` or open the monitoring dashboard to see them: https://fly.io/apps/%s/monitoring?region=&instance=%s\n", lm.Machine().ID, md.appConfig.AppName, lm.Machine().ID) - } else { - if logErr != nil { - err := fmt.Errorf("error getting logs for machine %s: %w", lm.Machine().ID, logErr) - span.RecordError(err) - return err - } - var log string - for _, l := range logs { - // Ideally we should use InstanceID here, but it's not available in the logs. - if l.Timestamp >= lm.Machine().UpdatedAt { - log += fmt.Sprintf("%s\n", l.Message) + logs, _, logErr := md.apiClient.GetAppLogs(ctx, md.app.Name, "", md.appConfig.PrimaryRegion, lm.Machine().ID) + switch { + case logErr == nil: + for _, l := range logs { + // Ideally we should use InstanceID here, but it's not available in the logs. + if l.Timestamp >= lm.Machine().UpdatedAt { + smokeErr.logs += fmt.Sprintf("%s\n", l.Message) + } } + case fly.IsNotAuthenticatedError(logErr): + span.AddEvent("not authorized to retrieve logs") + fmt.Fprintf(md.io.ErrOut, "Warn: not authorized to retrieve app logs (this can happen when using deploy tokens), so we can't show you what failed. Use `fly logs -i %s` or open the monitoring dashboard to see them: https://fly.io/apps/%s/monitoring?region=&instance=%s\n", lm.Machine().ID, md.appConfig.AppName, lm.Machine().ID) + smokeErr.logs = "" + default: + span.AddEvent("error retrieving machine logs") + fmt.Fprintf(md.io.ErrOut, "Warn: got an error retrieving the logs so we can't show you what failed. Use `fly logs -i %s` or open the monitoring dashboard to see them: https://fly.io/apps/%s/monitoring?region=&instance=%s\n", lm.Machine().ID, md.appConfig.AppName, lm.Machine().ID) + smokeErr.logs = fmt.Sprintf("", smokeErr.machineID) } - - err := &smokeChecksError{ - err: err, - machineID: lm.Machine().ID, - logs: log, - } - span.RecordError(err) - return err } - err = fmt.Errorf("smoke checks for %s failed: %v", lm.Machine().ID, err) - span.RecordError(err) - return err + + span.RecordError(smokeErr) + return smokeErr } func (md *machineDeployment) checkDNS(ctx context.Context) error { diff --git a/internal/command/doctor/diag/diag.go b/internal/command/doctor/diag/diag.go index f53712078a..be8887565a 100644 --- a/internal/command/doctor/diag/diag.go +++ b/internal/command/doctor/diag/diag.go @@ -96,12 +96,12 @@ add the --force flag to send us best-effort diagnostics.`) if err = ft.fn(ctx, zip); err != nil { if ft.expect { - fmt.Printf(color.Red(fmt.Sprintf("FAILED: %s\n", err))) + fmt.Print(color.Red(fmt.Sprintf("FAILED: %s\n", err))) } else { - fmt.Printf("skipping\n") + fmt.Print("skipping\n") } } else { - fmt.Printf(color.Green("ok\n")) + fmt.Print(color.Green("ok\n")) } } diff --git a/internal/command/launch/cmd.go b/internal/command/launch/cmd.go index 5ca940842b..763de8531e 100644 --- a/internal/command/launch/cmd.go +++ b/internal/command/launch/cmd.go @@ -113,6 +113,21 @@ func New() (cmd *cobra.Command) { Default: false, Hidden: true, }, + flag.Bool{ + Name: "no-db", + Description: "Skip automatically provisioning a database", + Default: false, + }, + flag.Bool{ + Name: "no-redis", + Description: "Skip automatically provisioning a Redis instance", + Default: false, + }, + flag.Bool{ + Name: "no-object-storage", + Description: "Skip automatically provisioning an object storage bucket", + Default: false, + }, flag.Bool{ Name: "json", Description: "Generate configuration in JSON format", diff --git a/internal/command/launch/deploy.go b/internal/command/launch/deploy.go index a232913819..f7c2e20dec 100644 --- a/internal/command/launch/deploy.go +++ b/internal/command/launch/deploy.go @@ -74,7 +74,7 @@ func (state *launchState) firstDeploy(ctx context.Context) error { err, extraInfo := state.appConfig.Validate(ctx) if extraInfo != "" { - fmt.Fprintf(io.ErrOut, extraInfo) + fmt.Fprint(io.ErrOut, extraInfo) } if err != nil { return fmt.Errorf("invalid configuration file: %w", err) diff --git a/internal/command/launch/plan_builder.go b/internal/command/launch/plan_builder.go index 11b1262b06..5a27e8e04f 100644 --- a/internal/command/launch/plan_builder.go +++ b/internal/command/launch/plan_builder.go @@ -218,20 +218,22 @@ func buildManifest(ctx context.Context, parentConfig *appconfig.Config, recovera if srcInfo != nil { lp.ScannerFamily = srcInfo.Family const scannerSource = "determined from app source" - switch srcInfo.DatabaseDesired { - case scanner.DatabaseKindPostgres: - lp.Postgres = plan.DefaultPostgres(lp) - planSource.postgresSource = scannerSource - case scanner.DatabaseKindMySQL: - // TODO - case scanner.DatabaseKindSqlite: - // TODO - } - if srcInfo.RedisDesired { + if !flag.GetBool(ctx, "no-db") { + switch srcInfo.DatabaseDesired { + case scanner.DatabaseKindPostgres: + lp.Postgres = plan.DefaultPostgres(lp) + planSource.postgresSource = scannerSource + case scanner.DatabaseKindMySQL: + // TODO + case scanner.DatabaseKindSqlite: + // TODO + } + } + if !flag.GetBool(ctx, "no-redis") && srcInfo.RedisDesired { lp.Redis = plan.DefaultRedis(lp) planSource.redisSource = scannerSource } - if srcInfo.ObjectStorageDesired { + if !flag.GetBool(ctx, "no-object-storage") && srcInfo.ObjectStorageDesired { lp.ObjectStorage = plan.DefaultObjectStorage(lp) planSource.tigrisSource = scannerSource } diff --git a/internal/command/launch/sourceinfo.go b/internal/command/launch/sourceinfo.go index 6a0b66d7c1..3843df2da0 100644 --- a/internal/command/launch/sourceinfo.go +++ b/internal/command/launch/sourceinfo.go @@ -115,7 +115,7 @@ func determineSourceInfo(ctx context.Context, appConfig *appconfig.Config, copyC if srcInfo.Builder != "" { fmt.Fprintln(io.Out, "Using the following build configuration:") fmt.Fprintln(io.Out, "\tBuilder:", srcInfo.Builder) - if srcInfo.Buildpacks != nil && len(srcInfo.Buildpacks) > 0 { + if len(srcInfo.Buildpacks) > 0 { fmt.Fprintln(io.Out, "\tBuildpacks:", strings.Join(srcInfo.Buildpacks, " ")) } diff --git a/internal/command/machine/egress_ip.go b/internal/command/machine/egress_ip.go index 1713e47a20..94697c2514 100644 --- a/internal/command/machine/egress_ip.go +++ b/internal/command/machine/egress_ip.go @@ -49,6 +49,7 @@ func newAllocateEgressIp() *cobra.Command { flag.Add(cmd, flag.App(), flag.AppConfig(), + flag.Yes(), ) cmd.Args = cobra.ExactArgs(1) @@ -93,6 +94,7 @@ func newReleaseEgressIP() *cobra.Command { flag.Add(cmd, flag.App(), flag.AppConfig(), + flag.Yes(), ) cmd.Args = cobra.ExactArgs(1) @@ -108,7 +110,7 @@ func runAllocateEgressIP(ctx context.Context) (err error) { machineId = args[0] ) - if !flag.GetBool(ctx, "yes") { + if !flag.GetYes(ctx) { msg := `Looks like you're allocating a static egress (outgoing) IP. This is an advanced feature, and is not needed by most apps. Are you sure this is what you want?` @@ -167,6 +169,21 @@ func runReleaseEgressIP(ctx context.Context) (err error) { machineId = args[0] ) + if !flag.GetYes(ctx) { + msg := `Are you sure?` + + switch confirmed, err := prompt.Confirm(ctx, msg); { + case err == nil: + if !confirmed { + return nil + } + case prompt.IsNonInteractive(err): + return prompt.NonInteractiveError("yes flag must be specified when not running interactively") + default: + return err + } + } + v4, v6, err := client.ReleaseEgressIPAddress(ctx, appName, machineId) if err != nil { return err diff --git a/internal/command/postgres/create.go b/internal/command/postgres/create.go index b8f530a4e0..9b701b3c65 100644 --- a/internal/command/postgres/create.go +++ b/internal/command/postgres/create.go @@ -161,7 +161,7 @@ func run(ctx context.Context) (err error) { // Initial cluster size may not be greater than 1 with fork-from if pgConfig.InitialClusterSize > 1 { - fmt.Fprintf(io.Out, colorize.Yellow("Warning: --initial-cluster-size is ignored when specifying --fork-from\n")) + fmt.Fprint(io.Out, colorize.Yellow("Warning: --initial-cluster-size is ignored when specifying --fork-from\n")) pgConfig.InitialClusterSize = 1 } diff --git a/internal/command/postgres/failover.go b/internal/command/postgres/failover.go index b6a0b37a72..6555372d53 100644 --- a/internal/command/postgres/failover.go +++ b/internal/command/postgres/failover.go @@ -2,6 +2,7 @@ package postgres import ( "context" + "errors" "fmt" "strings" "time" @@ -399,7 +400,7 @@ func pickNewLeader(ctx context.Context, app *fly.AppCompact, primaryCandidates [ err += "\nplease fix one or more of the above issues, and try again\n" - return nil, fmt.Errorf(err) + return nil, errors.New(err) } // Before doing anything that might mess up, it's useful to check if a dry run of the failover command will work, since that allows repmgr to do some checks diff --git a/internal/command/postgres/restart.go b/internal/command/postgres/restart.go index fb07cfa12a..5ce65f8ed1 100644 --- a/internal/command/postgres/restart.go +++ b/internal/command/postgres/restart.go @@ -136,7 +136,7 @@ func machinesRestart(ctx context.Context, appName string, input *fly.RestartMach if err := pgclient.Failover(ctx); err != nil { msg := fmt.Sprintf("failed to perform failover: %s", err.Error()) if !force { - return fmt.Errorf(msg) + return fmt.Errorf("failed to perform failover: %w", err) } fmt.Fprintln(io.Out, colorize.Red(msg)) diff --git a/internal/command/root/root.go b/internal/command/root/root.go index b840110c03..dd0880c128 100644 --- a/internal/command/root/root.go +++ b/internal/command/root/root.go @@ -3,6 +3,9 @@ package root import ( "context" + "log" + "os" + "path/filepath" "github.com/kr/text" "github.com/olekukonko/tablewriter" @@ -77,7 +80,16 @@ func New() *cobra.Command { short = "The Fly.io command line interface" ) - root := command.New("fly", short, long, run) + exePath, err := os.Executable() + var exe string + if err != nil { + log.Printf("WARN: failed to find executable, error=%q", err) + exe = "fly" + } else { + exe = filepath.Base(exePath) + } + + root := command.New(exe, short, long, run) root.PersistentPreRun = func(cmd *cobra.Command, args []string) { cmd.SilenceUsage = true cmd.SilenceErrors = true diff --git a/internal/command/ssh/sftp.go b/internal/command/ssh/sftp.go index d5e61fe874..04ec9fa1ec 100644 --- a/internal/command/ssh/sftp.go +++ b/internal/command/ssh/sftp.go @@ -144,7 +144,7 @@ func runLs(ctx context.Context) error { return err } - fmt.Printf(walker.Path() + "\n") + fmt.Println(walker.Path()) } return nil diff --git a/internal/command/ssh/ssh_terminal.go b/internal/command/ssh/ssh_terminal.go index f906f6700f..551cdc95da 100644 --- a/internal/command/ssh/ssh_terminal.go +++ b/internal/command/ssh/ssh_terminal.go @@ -56,7 +56,7 @@ func RunSSHCommand(ctx context.Context, app *fly.AppCompact, dialer agent.Dialer } if len(errBuf.Bytes()) > 0 { - return nil, fmt.Errorf(errBuf.String()) + return nil, errors.New(errBuf.String()) } return outBuf.Bytes(), nil diff --git a/internal/command/tokens/list.go b/internal/command/tokens/list.go index 53aa8b9bfe..aa007fbbf8 100644 --- a/internal/command/tokens/list.go +++ b/internal/command/tokens/list.go @@ -3,6 +3,7 @@ package tokens import ( "context" "fmt" + "time" "github.com/spf13/cobra" "github.com/superfly/flyctl/internal/appconfig" @@ -38,6 +39,8 @@ func newList() *cobra.Command { flag.Org(), ) + cmd.Aliases = []string{"ls"} + return cmd } @@ -89,7 +92,7 @@ func runList(ctx context.Context) (err error) { fmt.Fprintln(out, "Tokens for app \""+appName+"\":") for _, token := range tokens { - rows = append(rows, []string{token.Id, token.Name, token.User.Email, token.ExpiresAt.String()}) + rows = append(rows, []string{token.Id, token.Name, token.User.Email, token.ExpiresAt.String(), revokedAtToString(token.RevokedAt)}) } case "org": @@ -100,14 +103,21 @@ func runList(ctx context.Context) (err error) { fmt.Fprintln(out, "Tokens for organization \""+org.Slug+"\":") for _, token := range org.LimitedAccessTokens.Nodes { - rows = append(rows, []string{token.Id, token.Name, token.User.Email, token.ExpiresAt.String()}) + rows = append(rows, []string{token.Id, token.Name, token.User.Email, token.ExpiresAt.String(), revokedAtToString(token.RevokedAt)}) } } - _ = render.Table(out, "", rows, "ID", "Name", "Created By", "Expires At") + _ = render.Table(out, "", rows, "ID", "Name", "Created By", "Expires At", "Revoked At") return nil } +func revokedAtToString(time *time.Time) string { + if time != nil { + return time.String() + } + return "" +} + func determineScope(scopeStr string, appFlagStr string, orgFlagStr string, configFlagStr string) (scope string, err error) { // app scope is given highest priority, as it is more granular than org, identified by --app|--config|--scope=app // org scope is only used when specified by --org|--scope=org withought any app scope indicator diff --git a/internal/command/tokens/revoke.go b/internal/command/tokens/revoke.go index 32bfc6595e..f40319b64f 100644 --- a/internal/command/tokens/revoke.go +++ b/internal/command/tokens/revoke.go @@ -3,6 +3,7 @@ package tokens import ( "context" "fmt" + "os" "github.com/spf13/cobra" "github.com/superfly/flyctl/internal/command" @@ -13,8 +14,8 @@ import ( func newRevoke() *cobra.Command { const ( short = "Revoke tokens" - long = "used like: 'fly tokens revoke [ids]'" - usage = "revoke" + long = "Revoke one or more tokens." + usage = "revoke [flags] ID ID ..." ) cmd := command.New(usage, short, long, runRevoke, @@ -27,15 +28,24 @@ func newRevoke() *cobra.Command { func runRevoke(ctx context.Context) (err error) { apiClient := flyutil.ClientFromContext(ctx) + numRevoked := 0 + args := flag.Args(ctx) + if len(args) == 0 { + return fmt.Errorf("no token IDs; please provide IDs as positional arguments") + } + for _, id := range args { err := apiClient.RevokeLimitedAccessToken(ctx, id) if err != nil { - fmt.Printf("Failed to revoke token %s: %s\n", id, err) + fmt.Fprintf(os.Stderr, "failed to revoke token %s: %s\n", id, err) continue } fmt.Printf("Revoked %s\n", id) + numRevoked += 1 } + fmt.Printf("%d tokens revoked\n", numRevoked) + return nil } diff --git a/internal/machine/config.go b/internal/machine/config.go index 27928c22b5..d1b7c89006 100644 --- a/internal/machine/config.go +++ b/internal/machine/config.go @@ -32,7 +32,7 @@ func ConfirmConfigChanges(ctx context.Context, machine *fly.Machine, targetConfi } if customPrompt != "" { - fmt.Fprintf(io.Out, customPrompt) + fmt.Fprint(io.Out, customPrompt) } else { fmt.Fprintf(io.Out, "Configuration changes to be applied to machine: %s (%s)\n", colorize.Bold(machine.ID), colorize.Bold(machine.Name)) } diff --git a/internal/metrics/db.go b/internal/metrics/db.go index 1db598c623..ed672fe366 100644 --- a/internal/metrics/db.go +++ b/internal/metrics/db.go @@ -5,7 +5,6 @@ import ( "context" "encoding/json" "fmt" - "io" "net/http" "os" "os/exec" @@ -54,7 +53,7 @@ func FlushMetrics(ctx context.Context) error { } go func() { - io.WriteString(stdin, string(json)) + stdin.Write(json) stdin.Close() }() diff --git a/internal/statuslogger/interactivelogger.go b/internal/statuslogger/interactivelogger.go index 395c6ecbf8..cc14d40cf8 100644 --- a/internal/statuslogger/interactivelogger.go +++ b/internal/statuslogger/interactivelogger.go @@ -50,7 +50,7 @@ func (il *interactiveLogger) Destroy(clear bool) { il.done = false if clear { - fmt.Fprintf(il.io.Out, il.clearStr()) + fmt.Print(il.io.Out, il.clearStr()) } else { fmt.Fprintf(il.io.Out, "%s%s\n", aec.Down(uint(il.height(il.prevLines))), divider) } diff --git a/scanner/jsFramework.go b/scanner/jsFramework.go index 75468746a3..c414711f86 100644 --- a/scanner/jsFramework.go +++ b/scanner/jsFramework.go @@ -298,7 +298,13 @@ func JsFrameworkCallback(appName string, srcInfo *SourceInfo, plan *plan.LaunchP _, err = os.Stat("pnpm-lock.yaml") if !errors.Is(err, fs.ErrNotExist) { - args = []string{"pnpm", "add", "-D", "@flydotio/dockerfile@latest"} + + _, err = os.Stat("pnpm-workspace.yaml") + if errors.Is(err, fs.ErrNotExist) { + args = []string{"pnpm", "add", "-D", "@flydotio/dockerfile@latest"} + } else { + args = []string{"pnpm", "add", "-w", "-D", "@flydotio/dockerfile@latest"} + } } _, err = os.Stat("bun.lockb") diff --git a/test/deployer/deployer_test.go b/test/deployer/deployer_test.go index bab9f81461..39b1f0728a 100644 --- a/test/deployer/deployer_test.go +++ b/test/deployer/deployer_test.go @@ -155,6 +155,31 @@ func TestLaunchBasicNode(t *testing.T) { require.Equal(t, string(body), "Hello, World!") } +func TestLaunchBasicBun(t *testing.T) { + deploy := testDeployer(t, + withFixtureApp("bun-basic"), + createRandomApp, + testlib.WithoutCustomize, + testlib.WithouExtensions, + testlib.OptOutGithubActions, + testlib.DeployNow, + withWorkDirAppSource, + ) + + manifest, err := deploy.Output().ArtifactManifest() + require.NoError(t, err) + require.NotNil(t, manifest) + + require.Equal(t, manifest.Plan.Runtime.Language, "bun") + + appName := deploy.Extra["appName"].(string) + + body, err := testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev", appName)) + require.NoError(t, err) + + require.Equal(t, string(body), "Hello, Bun!") +} + func TestLaunchGoFromRepo(t *testing.T) { deploy := testDeployer(t, createRandomApp, diff --git a/test/fixtures/bun-basic/.gitignore b/test/fixtures/bun-basic/.gitignore new file mode 100644 index 0000000000..9b1ee42e84 --- /dev/null +++ b/test/fixtures/bun-basic/.gitignore @@ -0,0 +1,175 @@ +# Based on https://raw.githubusercontent.com/github/gitignore/main/Node.gitignore + +# Logs + +logs +_.log +npm-debug.log_ +yarn-debug.log* +yarn-error.log* +lerna-debug.log* +.pnpm-debug.log* + +# Caches + +.cache + +# Diagnostic reports (https://nodejs.org/api/report.html) + +report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json + +# Runtime data + +pids +_.pid +_.seed +*.pid.lock + +# Directory for instrumented libs generated by jscoverage/JSCover + +lib-cov + +# Coverage directory used by tools like istanbul + +coverage +*.lcov + +# nyc test coverage + +.nyc_output + +# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) + +.grunt + +# Bower dependency directory (https://bower.io/) + +bower_components + +# node-waf configuration + +.lock-wscript + +# Compiled binary addons (https://nodejs.org/api/addons.html) + +build/Release + +# Dependency directories + +node_modules/ +jspm_packages/ + +# Snowpack dependency directory (https://snowpack.dev/) + +web_modules/ + +# TypeScript cache + +*.tsbuildinfo + +# Optional npm cache directory + +.npm + +# Optional eslint cache + +.eslintcache + +# Optional stylelint cache + +.stylelintcache + +# Microbundle cache + +.rpt2_cache/ +.rts2_cache_cjs/ +.rts2_cache_es/ +.rts2_cache_umd/ + +# Optional REPL history + +.node_repl_history + +# Output of 'npm pack' + +*.tgz + +# Yarn Integrity file + +.yarn-integrity + +# dotenv environment variable files + +.env +.env.development.local +.env.test.local +.env.production.local +.env.local + +# parcel-bundler cache (https://parceljs.org/) + +.parcel-cache + +# Next.js build output + +.next +out + +# Nuxt.js build / generate output + +.nuxt +dist + +# Gatsby files + +# Comment in the public line in if your project uses Gatsby and not Next.js + +# https://nextjs.org/blog/next-9-1#public-directory-support + +# public + +# vuepress build output + +.vuepress/dist + +# vuepress v2.x temp and cache directory + +.temp + +# Docusaurus cache and generated files + +.docusaurus + +# Serverless directories + +.serverless/ + +# FuseBox cache + +.fusebox/ + +# DynamoDB Local files + +.dynamodb/ + +# TernJS port file + +.tern-port + +# Stores VSCode versions used for testing VSCode extensions + +.vscode-test + +# yarn v2 + +.yarn/cache +.yarn/unplugged +.yarn/build-state.yml +.yarn/install-state.gz +.pnp.* + +# IntelliJ based IDEs +.idea + +# Finder (MacOS) folder config +.DS_Store diff --git a/test/fixtures/bun-basic/README.md b/test/fixtures/bun-basic/README.md new file mode 100644 index 0000000000..534039d7b7 --- /dev/null +++ b/test/fixtures/bun-basic/README.md @@ -0,0 +1,15 @@ +# bun-basic + +To install dependencies: + +```bash +bun install +``` + +To run: + +```bash +bun run index.ts +``` + +This project was created using `bun init` in bun v1.1.3. [Bun](https://bun.sh) is a fast all-in-one JavaScript runtime. diff --git a/test/fixtures/bun-basic/bun.lockb b/test/fixtures/bun-basic/bun.lockb new file mode 100755 index 0000000000000000000000000000000000000000..904037df38dd1d98eac62894e87353506d0267fe GIT binary patch literal 3134 zcmY#Z)GsYA(of3F(@)JSQ%EY!;{sycoc!eMw9K4T-L(9o+{6;yG6OCq1_lNfzZLs8 ztLGh568qBXAjR8k+cfWuE2GTLuq5+7vqoN#b=*KjK)?>67&y@A1}HxprU1<6WMF6z z2C}$-bTE(x@fQGTpkWP9fiw@0o(>fU%7N9jb}oo7JsE9qicNE`w?X5V>Nn|UJA_^< zP1KL-RoiN7$(e6+>{Z~b-HT7psGOg5DRaJRgN}ABY})UpHL1m)dcjN#|&isdiR#o>9F2PVw596Rc_MMb2zf;`d*WOWyEg zVXHHexybRvvhn2M=T|Zl>`RYTXge|;o8Z4IXyX-o^RCp4#2%4ZcS@EVGP_V*Z@0Q6 zLov|cDC5M##diHOGEJE6rm>b6MrqGNGM5GBSRl=!7SLn;d|`g|T07>Al?!=lUm{xIHQAd)QsZ_Sl571H59QYbOil ze%*I1F5+ORvsQ<*$`!+jNan)Q8OYfzZRSGPGU9h@?1;?gh`Mq9?XJUXa_8(Y%bToq zW3Ae{g@wJ%vO@38H4t#wL>rmKmRbOPWd?VXA*!!n^*XGc7GR7s)icmDU|@i? z3}7`q2UAXBNosKk0|TtB0IU0%7!CCd^^A=e7+|dhSnbcmSZASUs%L1xumI>DP`d)w zP5{an8R!`r=~+Al%79uLu$BW*##GP9OwR<19xb3+5a@vV@C4ka$WdCHnp>7yq~}^u zl3J9Pm=j!5l$n=qr(lQ(1Pgc|loTg}O+4^hWNFcq2_m2d8Ay!(-eQ|~vW2@!ii=Z= zfTqP>R%i@L^ZtU?cEohpU0=2CD~~VGK3{YJ7Pyl#!R8l8VPTBvn8IL5g)Dv1w=ma#3ksN@j8< zK}R7g0~&+O)deR3Q;=bVol#y4)LmYz3r=c~AOXiBBA7nu7 literal 0 HcmV?d00001 diff --git a/test/fixtures/bun-basic/index.ts b/test/fixtures/bun-basic/index.ts new file mode 100644 index 0000000000..1daad102b5 --- /dev/null +++ b/test/fixtures/bun-basic/index.ts @@ -0,0 +1,5 @@ +Bun.serve({ + fetch(req) { + return new Response("Hello, Bun!"); + }, + }); \ No newline at end of file diff --git a/test/fixtures/bun-basic/package.json b/test/fixtures/bun-basic/package.json new file mode 100644 index 0000000000..f0076f2e3f --- /dev/null +++ b/test/fixtures/bun-basic/package.json @@ -0,0 +1,11 @@ +{ + "name": "bun-basic", + "module": "index.ts", + "type": "module", + "devDependencies": { + "@types/bun": "latest" + }, + "peerDependencies": { + "typescript": "^5.0.0" + } +} \ No newline at end of file diff --git a/test/fixtures/bun-basic/tsconfig.json b/test/fixtures/bun-basic/tsconfig.json new file mode 100644 index 0000000000..238655f2ce --- /dev/null +++ b/test/fixtures/bun-basic/tsconfig.json @@ -0,0 +1,27 @@ +{ + "compilerOptions": { + // Enable latest features + "lib": ["ESNext", "DOM"], + "target": "ESNext", + "module": "ESNext", + "moduleDetection": "force", + "jsx": "react-jsx", + "allowJs": true, + + // Bundler mode + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "verbatimModuleSyntax": true, + "noEmit": true, + + // Best practices + "strict": true, + "skipLibCheck": true, + "noFallthroughCasesInSwitch": true, + + // Some stricter flags (disabled by default) + "noUnusedLocals": false, + "noUnusedParameters": false, + "noPropertyAccessFromIndexSignature": false + } +} From c27000145968136c59785d1e08a58c47a69c25ea Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Wed, 30 Oct 2024 10:59:24 -0400 Subject: [PATCH 087/104] explicitly disallow upstash replicas if no upstash regions provided --- deploy.rb | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/deploy.rb b/deploy.rb index 70db2cbcbd..ef28952105 100755 --- a/deploy.rb +++ b/deploy.rb @@ -371,8 +371,10 @@ cmd += " --disable-eviction" end - if (regions = UPSTASH["regions"]) + if (regions = UPSTASH["regions"]) && !regions.empty? cmd += " --replica-regions #{regions.join(",")}" + else + cmd += " --no-replicas" end artifact Artifact::UPSTASH_REDIS, { config: UPSTASH, region: APP_REGION, name: db_name } From 8ea62fb5698df04c8b9beb1040eb409506d83d57 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Wed, 30 Oct 2024 11:26:12 -0400 Subject: [PATCH 088/104] soft skip extensions in tests so the step still shows up in logs --- deploy.rb | 12 ++++++------ deploy/common.rb | 8 ++++++-- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/deploy.rb b/deploy.rb index ef28952105..e918c67dd4 100755 --- a/deploy.rb +++ b/deploy.rb @@ -319,9 +319,9 @@ end end -if !DEPLOY_ONLY && get_env("SKIP_EXTENSIONS").nil? +if !DEPLOY_ONLY if FLY_PG - in_step Step::FLY_POSTGRES_CREATE do + in_step Step::FLY_POSTGRES_CREATE, skip: !get_env("SKIP_EXTENSIONS").nil? do pg_name = FLY_PG["app_name"] region = APP_REGION @@ -350,7 +350,7 @@ exec_capture("flyctl pg attach #{pg_name} --app #{APP_NAME} -y") end elsif SUPABASE - in_step Step::SUPABASE_POSTGRES do + in_step Step::SUPABASE_POSTGRES, skip: !get_env("SKIP_EXTENSIONS").nil? do cmd = "flyctl ext supabase create --org #{ORG_SLUG} --name #{SUPABASE["db_name"]} --region #{SUPABASE["region"]} --app #{APP_NAME} --yes" artifact Artifact::SUPABASE_POSTGRES, { config: SUPABASE } @@ -360,7 +360,7 @@ end if UPSTASH - in_step Step::UPSTASH_REDIS do + in_step Step::UPSTASH_REDIS, skip: !get_env("SKIP_EXTENSIONS").nil? do db_name = "#{APP_NAME}-redis" cmd = "flyctl redis create --name #{db_name} --org #{ORG_SLUG} --region #{APP_REGION}" @@ -384,7 +384,7 @@ end if TIGRIS - in_step Step::TIGRIS_OBJECT_STORAGE do + in_step Step::TIGRIS_OBJECT_STORAGE, skip: !get_env("SKIP_EXTENSIONS").nil? do cmd = "flyctl ext tigris create --org #{ORG_SLUG} --app #{APP_NAME} --yes" if (name = TIGRIS["name"]) && !name.empty? @@ -410,7 +410,7 @@ end if SENTRY - in_step Step::SENTRY do + in_step Step::SENTRY, skip: !get_env("SKIP_EXTENSIONS").nil? do exec_capture("flyctl ext sentry create --app #{APP_NAME} --yes") end end diff --git a/deploy/common.rb b/deploy/common.rb index ec2d57c8dd..c854a1f9a6 100644 --- a/deploy/common.rb +++ b/deploy/common.rb @@ -132,12 +132,16 @@ def exec_capture(cmd, display: nil, log: true) output end -def in_step(step, &block) +def in_step(step, skip: false, &block) old_step = Step.current() Step.set_current(step) event :start ret = begin - yield block + if skip + event :skip + else + yield block + end rescue StandardError => e event :error, { type: :uncaught, message: e } exit 1 From 86705aef7a55c425c446fd61c67d1a973823942a Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Wed, 30 Oct 2024 14:03:14 -0400 Subject: [PATCH 089/104] fix python version detection from Pipfile, add django fixture --- scanner/django.go | 37 +---- scanner/python.go | 58 ++++++++ test/deployer/deployer_test.go | 26 ++++ test/fixtures/django-basic/Pipfile | 22 +++ test/fixtures/django-basic/Pipfile.lock | 127 ++++++++++++++++++ test/fixtures/django-basic/manage.py | 22 +++ test/fixtures/django-basic/mysite/__init__.py | 0 .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 186 bytes .../__pycache__/settings.cpython-312.pyc | Bin 0 -> 2561 bytes .../mysite/__pycache__/urls.cpython-312.pyc | Bin 0 -> 471 bytes .../mysite/__pycache__/wsgi.cpython-312.pyc | Bin 0 -> 672 bytes test/fixtures/django-basic/mysite/asgi.py | 16 +++ test/fixtures/django-basic/mysite/settings.py | 123 +++++++++++++++++ test/fixtures/django-basic/mysite/urls.py | 7 + test/fixtures/django-basic/mysite/wsgi.py | 16 +++ test/fixtures/django-basic/polls/__init__.py | 0 .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 185 bytes .../polls/__pycache__/urls.cpython-312.pyc | Bin 0 -> 367 bytes .../polls/__pycache__/views.cpython-312.pyc | Bin 0 -> 412 bytes test/fixtures/django-basic/polls/admin.py | 3 + test/fixtures/django-basic/polls/apps.py | 6 + .../django-basic/polls/migrations/__init__.py | 0 test/fixtures/django-basic/polls/models.py | 3 + test/fixtures/django-basic/polls/tests.py | 3 + test/fixtures/django-basic/polls/urls.py | 7 + test/fixtures/django-basic/polls/views.py | 5 + 26 files changed, 446 insertions(+), 35 deletions(-) create mode 100644 test/fixtures/django-basic/Pipfile create mode 100644 test/fixtures/django-basic/Pipfile.lock create mode 100755 test/fixtures/django-basic/manage.py create mode 100644 test/fixtures/django-basic/mysite/__init__.py create mode 100644 test/fixtures/django-basic/mysite/__pycache__/__init__.cpython-312.pyc create mode 100644 test/fixtures/django-basic/mysite/__pycache__/settings.cpython-312.pyc create mode 100644 test/fixtures/django-basic/mysite/__pycache__/urls.cpython-312.pyc create mode 100644 test/fixtures/django-basic/mysite/__pycache__/wsgi.cpython-312.pyc create mode 100644 test/fixtures/django-basic/mysite/asgi.py create mode 100644 test/fixtures/django-basic/mysite/settings.py create mode 100644 test/fixtures/django-basic/mysite/urls.py create mode 100644 test/fixtures/django-basic/mysite/wsgi.py create mode 100644 test/fixtures/django-basic/polls/__init__.py create mode 100644 test/fixtures/django-basic/polls/__pycache__/__init__.cpython-312.pyc create mode 100644 test/fixtures/django-basic/polls/__pycache__/urls.cpython-312.pyc create mode 100644 test/fixtures/django-basic/polls/__pycache__/views.cpython-312.pyc create mode 100644 test/fixtures/django-basic/polls/admin.py create mode 100644 test/fixtures/django-basic/polls/apps.py create mode 100644 test/fixtures/django-basic/polls/migrations/__init__.py create mode 100644 test/fixtures/django-basic/polls/models.py create mode 100644 test/fixtures/django-basic/polls/tests.py create mode 100644 test/fixtures/django-basic/polls/urls.py create mode 100644 test/fixtures/django-basic/polls/views.py diff --git a/scanner/django.go b/scanner/django.go index e45737f7e1..7c26d6046a 100644 --- a/scanner/django.go +++ b/scanner/django.go @@ -4,10 +4,8 @@ import ( "encoding/json" "fmt" "os" - "os/exec" "path" "path/filepath" - "regexp" "strings" "github.com/blang/semver" @@ -298,10 +296,10 @@ For detailed documentation, see https://fly.dev/docs/django/ cmd = []string{"gunicorn", "--bind", ":8000", "--workers", "2", "--worker-class", "uvicorn.workers.UvicornWorker", vars["asgiName"].(string) + ".asgi"} } else if vars["asgiFound"] == true && vars["hasDaphne"] == true { cmd = []string{"daphne", "-b", "0.0.0.0", "-p", "8000", vars["asgiName"].(string) + ".asgi"} - } else if vars["wsgiFound"] == true { + } else if vars["wsgiFound"] == true && vars["hasGunicorn"] == true { cmd = []string{"gunicorn", "--bind", ":8000", "--workers", "2", vars["wsgiName"].(string) + ".wsgi"} } else { - cmd = []string{"python", "manage.py", "runserver"} + cmd = []string{"python", "manage.py", "runserver", "0.0.0.0:8000"} } // Serialize the array to JSON @@ -346,34 +344,3 @@ For detailed documentation, see https://fly.dev/docs/django/ return s, nil } - -func extractPythonVersion() (string, bool, error) { - /* Example Output: - Python 3.11.2 - Python 3.12.0b4 - */ - pythonVersionOutput := "Python 3.12.0" // Fallback to 3.12 - - cmd := exec.Command("python3", "--version") - out, err := cmd.CombinedOutput() - if err == nil { - pythonVersionOutput = string(out) - } else { - cmd := exec.Command("python", "--version") - out, err := cmd.CombinedOutput() - if err == nil { - pythonVersionOutput = string(out) - } - } - - re := regexp.MustCompile(`Python ([0-9]+\.[0-9]+\.[0-9]+(?:[a-zA-Z]+[0-9]+)?)`) - match := re.FindStringSubmatch(pythonVersionOutput) - - if len(match) > 1 { - version := match[1] - nonNumericRegex := regexp.MustCompile(`[^0-9.]`) - pinned := nonNumericRegex.MatchString(version) - return version, pinned, nil - } - return "", false, fmt.Errorf("Could not find Python version") -} diff --git a/scanner/python.go b/scanner/python.go index addfcfeef6..e4099ea44d 100644 --- a/scanner/python.go +++ b/scanner/python.go @@ -2,8 +2,12 @@ package scanner import ( "bufio" + "encoding/json" + "fmt" "os" + "os/exec" "path/filepath" + "regexp" "slices" "strings" "unicode" @@ -52,6 +56,17 @@ type PyProjectToml struct { type Pipfile struct { Packages map[string]interface{} + Requires PipfileRequires `json:"requires" toml:"requires"` +} + +type PipfileRequires struct { + PythonVersion string `json:"python_version" toml:"python_version"` +} + +type PipfileLock struct { + Meta struct { + Requires PipfileRequires `json:"requires" toml:"requires"` + } `json:"_meta"` } type PyCfg struct { @@ -279,10 +294,12 @@ func configPipfile(sourceDir string, _ *ScannerConfig) (*SourceInfo, error) { dep := parsePyDep(dep) depList = append(depList, dep) } + pyVersion, _, err := extractPythonVersion() if err != nil { return nil, err } + appName := filepath.Base(sourceDir) cfg := PyCfg{pyVersion, appName, depList, Pipenv} return intoSource(cfg) @@ -365,3 +382,44 @@ func configurePython(sourceDir string, _ *ScannerConfig) (*SourceInfo, error) { return s, nil } + +func extractPythonVersion() (string, bool, error) { + var pipfileLock PipfileLock + contents, err := os.ReadFile("Pipfile.lock") + if err == nil { + if err := json.Unmarshal(contents, &pipfileLock); err == nil { + if pyVersion := pipfileLock.Meta.Requires.PythonVersion; pyVersion != "" { + return pyVersion, true, nil + } + } + } + + /* Example Output: + Python 3.11.2 + Python 3.12.0b4 + */ + pythonVersionOutput := "Python 3.12.0" // Fallback to 3.12 + + cmd := exec.Command("python3", "--version") + out, err := cmd.CombinedOutput() + if err == nil { + pythonVersionOutput = string(out) + } else { + cmd := exec.Command("python", "--version") + out, err := cmd.CombinedOutput() + if err == nil { + pythonVersionOutput = string(out) + } + } + + re := regexp.MustCompile(`Python ([0-9]+\.[0-9]+\.[0-9]+(?:[a-zA-Z]+[0-9]+)?)`) + match := re.FindStringSubmatch(pythonVersionOutput) + + if len(match) > 1 { + version := match[1] + nonNumericRegex := regexp.MustCompile(`[^0-9.]`) + pinned := nonNumericRegex.MatchString(version) + return version, pinned, nil + } + return "", false, fmt.Errorf("Could not find Python version") +} diff --git a/test/deployer/deployer_test.go b/test/deployer/deployer_test.go index 39b1f0728a..46a3264136 100644 --- a/test/deployer/deployer_test.go +++ b/test/deployer/deployer_test.go @@ -268,6 +268,32 @@ func TestLaunchRails8(t *testing.T) { require.NoError(t, err) } +func TestLaunchDjangoBasic(t *testing.T) { + deploy := testDeployer(t, + withFixtureApp("django-basic"), + createRandomApp, + testlib.WithoutCustomize, + testlib.WithouExtensions, + testlib.DeployNow, + withWorkDirAppSource, + testlib.CleanupBeforeExit, + ) + + manifest, err := deploy.Output().ArtifactManifest() + require.NoError(t, err) + require.NotNil(t, manifest) + + require.Equal(t, "python", manifest.Plan.Runtime.Language) + require.Equal(t, "3.11", manifest.Plan.Runtime.Version) + require.Equal(t, "Django", manifest.Plan.ScannerFamily) + + appName := deploy.Extra["appName"].(string) + + body, err := testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev/polls/", appName)) + require.NoError(t, err) + require.Contains(t, string(body), "Hello, world. You're at the polls index.") +} + func createRandomApp(d *testlib.DeployTestRun) { appName := d.CreateRandomAppName() require.NotEmpty(d, appName) diff --git a/test/fixtures/django-basic/Pipfile b/test/fixtures/django-basic/Pipfile new file mode 100644 index 0000000000..069c9b3721 --- /dev/null +++ b/test/fixtures/django-basic/Pipfile @@ -0,0 +1,22 @@ +[[source]] +url = "https://pypi.org/simple" +verify_ssl = true +name = "pypi" + +[packages] +asgiref = "==3.7.2" +certifi = "==2023.11.17" +distlib = "==0.3.8" +django = "==4.2.11" +filelock = "==3.13.1" +pipenv = "==2023.2.4" +platformdirs = "==4.2.0" +sqlparse = "==0.4.4" +typing-extensions = "==4.10.0" +virtualenv = "==20.25.1" +virtualenv-clone = "==0.5.7" + +[dev-packages] + +[requires] +python_version = "3.11" diff --git a/test/fixtures/django-basic/Pipfile.lock b/test/fixtures/django-basic/Pipfile.lock new file mode 100644 index 0000000000..a1d782f4c9 --- /dev/null +++ b/test/fixtures/django-basic/Pipfile.lock @@ -0,0 +1,127 @@ +{ + "_meta": { + "hash": { + "sha256": "7c922db9a0297f182ab81d25ab35c87fb8905c26e97b442d1bf73e34d1fcd718" + }, + "pipfile-spec": 6, + "requires": { + "python_version": "3.11" + }, + "sources": [ + { + "name": "pypi", + "url": "https://pypi.org/simple", + "verify_ssl": true + } + ] + }, + "default": { + "asgiref": { + "hashes": [ + "sha256:89b2ef2247e3b562a16eef663bc0e2e703ec6468e2fa8a5cd61cd449786d4f6e", + "sha256:9e0ce3aa93a819ba5b45120216b23878cf6e8525eb3848653452b4192b92afed" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==3.7.2" + }, + "certifi": { + "hashes": [ + "sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1", + "sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474" + ], + "index": "pypi", + "markers": "python_version >= '3.6'", + "version": "==2023.11.17" + }, + "distlib": { + "hashes": [ + "sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784", + "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64" + ], + "index": "pypi", + "version": "==0.3.8" + }, + "django": { + "hashes": [ + "sha256:6e6ff3db2d8dd0c986b4eec8554c8e4f919b5c1ff62a5b4390c17aff2ed6e5c4", + "sha256:ddc24a0a8280a0430baa37aff11f28574720af05888c62b7cfe71d219f4599d3" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==4.2.11" + }, + "filelock": { + "hashes": [ + "sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e", + "sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==3.13.1" + }, + "pipenv": { + "hashes": [ + "sha256:18a3eba519e36d59f0d5a7f9c42bd268521e4b9b7b3d1bd6adcf131569323275", + "sha256:dd62abe8efa34b3d13e47b226bd151a1110dc5591557c559beca7d52efb55c18" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==2023.2.4" + }, + "platformdirs": { + "hashes": [ + "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068", + "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==4.2.0" + }, + "setuptools": { + "hashes": [ + "sha256:f2504966861356aa38616760c0f66568e535562374995367b4e69c7143cf6bcd", + "sha256:fba5dd4d766e97be1b1681d98712680ae8f2f26d7881245f2ce9e40714f1a686" + ], + "markers": "python_version >= '3.8'", + "version": "==75.3.0" + }, + "sqlparse": { + "hashes": [ + "sha256:5430a4fe2ac7d0f93e66f1efc6e1338a41884b7ddf2a350cedd20ccc4d9d28f3", + "sha256:d446183e84b8349fa3061f0fe7f06ca94ba65b426946ffebe6e3e8295332420c" + ], + "index": "pypi", + "markers": "python_version >= '3.5'", + "version": "==0.4.4" + }, + "typing-extensions": { + "hashes": [ + "sha256:69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475", + "sha256:b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==4.10.0" + }, + "virtualenv": { + "hashes": [ + "sha256:961c026ac520bac5f69acb8ea063e8a4f071bcc9457b9c1f28f6b085c511583a", + "sha256:e08e13ecdca7a0bd53798f356d5831434afa5b07b93f0abdf0797b7a06ffe197" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==20.25.1" + }, + "virtualenv-clone": { + "hashes": [ + "sha256:418ee935c36152f8f153c79824bb93eaf6f0f7984bae31d3f48f350b9183501a", + "sha256:44d5263bceed0bac3e1424d64f798095233b64def1c5689afa43dc3223caf5b0" + ], + "index": "pypi", + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==0.5.7" + } + }, + "develop": {} +} diff --git a/test/fixtures/django-basic/manage.py b/test/fixtures/django-basic/manage.py new file mode 100755 index 0000000000..2087a6195c --- /dev/null +++ b/test/fixtures/django-basic/manage.py @@ -0,0 +1,22 @@ +#!/nix/store/fmwqa8nvva4sh18bqayzrilrzxq9fm0f-python3-3.12.2/bin/python +"""Django's command-line utility for administrative tasks.""" +import os +import sys + + +def main(): + """Run administrative tasks.""" + os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mysite.settings') + try: + from django.core.management import execute_from_command_line + except ImportError as exc: + raise ImportError( + "Couldn't import Django. Are you sure it's installed and " + "available on your PYTHONPATH environment variable? Did you " + "forget to activate a virtual environment?" + ) from exc + execute_from_command_line(sys.argv) + + +if __name__ == '__main__': + main() diff --git a/test/fixtures/django-basic/mysite/__init__.py b/test/fixtures/django-basic/mysite/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/django-basic/mysite/__pycache__/__init__.cpython-312.pyc b/test/fixtures/django-basic/mysite/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47d6f3049c0f4e06f629d9499687eb0afa02dc59 GIT binary patch literal 186 zcmX@j%ge<81lLlP(n0iN5P=Rpvj9b=GgLBYGWxA#C}INgK7-W!iqy}@&rQ|ON-Y9W z#YM^b>6s-NrAd0p`MLVVr3I-)X*repK%88XqhFF*T%wx1R z2hfxqWf+uv=}}o}3QeOFNKKU#pzw5Qp~k2a|_ogABn7D16vBkh^uwE>u8{|s32W6 zI6@VRjwG*h9ReWS2=f;IIlr_7>MJg$X<<$^0A5$9Y8o7TX-frzs)lcXeH>{~+9qFT zGRUkGerV*tg}SL{u#o}83^m(oon#Ii&J5vc3v-Q*rg54|C}%c~E~FR;3}Myco-0}h zlmBDO!i_OQf4MnLvyN|Fp)GrP6b98GxZbhwVvBBHy<^Xt&+k9^!v|1wZ;#kYM+^;cEIAr38Jfl z4NALhOzhbSCc%V2n2$4bU?@${xGXJyqJ}%kG6? zvv$a{Z%u-Y%=2UV%aCtN~P>rc0o`cvoz{`(b6(Q&EmRy`Thiw@h2?^qhK zGb0d+^|l6`#@9d_8|a`B_}rlBDkC;=QiJTjpfa>b9egq^lJ?o3PQo0Q;t{TOn)duD zBrW_58iv{nBiX|mbpv_=vKwgo=7jKJ5KmAz;H7;|a9PMc6pJ~>m&#BQSkb6<6h87ygIRXdjsS7APkWqzSX|8) zMaNeZ);&5|A~m&Uc_Fk;NS58P;?gZlOzk;# z0&SQm26aNqLRoYZ?L^9Ac0;Vl55>ohKPN6%SDm;}D3mtEocy3vu9TfbzF4jR2?$Uf zWhb(h&*ci@rm!J8v5iuxB3CyG*-~-E30K6m^@30l%T98$yqb6A3;C?z7LpUr2^GOL zTXyDzYUP2vE|klgrH!2YNGRliS%UhD7lh(!Rah0}Y$+!KV17-Mr4p1(s0t3}m%b`G z0e4rC!1D?qXI<+~DkrW8)j~xEA3}E_Asr3hv#x<9 z_umSEKQBz*%d+ggC&K#vnPb@V`vE4F{4w;yy|>ZJJJHL(N3Xn%&hJF$-$s{qqD#F{ zKfxq6*j}U`V8Zd9e=nKp#otAODNir)E)`06fHT7+FZE*m5HlmPyUEn<%-mj#d&l^% zB>LXqOk_VC^9TCgMBLL0_c>-}@ePv%RDWVO#qTEPe!07oyuR=ECPBtOkh#Ewr`{x` zzSjeKp}}E&{P5lQVPkkW8~@1jhMDVMW2WbN@x9n|FT59>>V@{e=|DfmBxb?U2y^a> L9VTU6g;@LxX9ZP$ literal 0 HcmV?d00001 diff --git a/test/fixtures/django-basic/mysite/__pycache__/urls.cpython-312.pyc b/test/fixtures/django-basic/mysite/__pycache__/urls.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3ce35ff6b43a0980488e315e88ff1e009b321fc GIT binary patch literal 471 zcmY*VJ#WG=5VaF0P$=!tg|TW?sTqnVR0*N-3)-zqHi%6~B*NB>u}t(YAbv_m zrbendbm~UM##B3mp(pv?-MhQ@B)_*>4WQz2eC4kce(UBKc2(@As#pR72xK6FE`+)^ zm=T$-86g)zU;v7Usj^AX8XoS}#sSNjMPXPvRhkuqr~iB19q)X@t>v~;`_ZHW3u{QC zAa;zR7Q`OQ2rW=Ll#`jACX5L@Yn0f@I1@9w+{I5$y*MM`F!qyBW^0SqFIr8 znIpw;OVdaMQpswzXsWFACFQYLft?zI{WG2<5yc_p8U**SAIM2IQsNO7SxWhsLpE+m(-PGN4DFoyX}DZ z3H%1MpTcDbG^CtBhyyoJZ#&^MtF|ZE?|XiJ@B5zY&ueQQvUUlZ>3fXOuQFI#?HbI7 z7JNi8>Z2IP4#$0Ly~~}4qygk@Edb59euNvOhUimdPzj$=yCTD> z0c~i)PRvAsrWNznFw`ufmCVJgem=WeeO|r53)QqN_ERCfZMVNJ#`vF8$F=zbguUPB l$q(dw@XoyNX#ERXzu53Ews*gwmp_}EUz?B48tSes`VTwM$Q1wp literal 0 HcmV?d00001 diff --git a/test/fixtures/django-basic/mysite/asgi.py b/test/fixtures/django-basic/mysite/asgi.py new file mode 100644 index 0000000000..cce50dcad1 --- /dev/null +++ b/test/fixtures/django-basic/mysite/asgi.py @@ -0,0 +1,16 @@ +""" +ASGI config for mysite project. + +It exposes the ASGI callable as a module-level variable named ``application``. + +For more information on this file, see +https://docs.djangoproject.com/en/4.2/howto/deployment/asgi/ +""" + +import os + +from django.core.asgi import get_asgi_application + +os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mysite.settings') + +application = get_asgi_application() diff --git a/test/fixtures/django-basic/mysite/settings.py b/test/fixtures/django-basic/mysite/settings.py new file mode 100644 index 0000000000..70dd730028 --- /dev/null +++ b/test/fixtures/django-basic/mysite/settings.py @@ -0,0 +1,123 @@ +""" +Django settings for mysite project. + +Generated by 'django-admin startproject' using Django 4.2.11. + +For more information on this file, see +https://docs.djangoproject.com/en/4.2/topics/settings/ + +For the full list of settings and their values, see +https://docs.djangoproject.com/en/4.2/ref/settings/ +""" + +from pathlib import Path + +# Build paths inside the project like this: BASE_DIR / 'subdir'. +BASE_DIR = Path(__file__).resolve().parent.parent + + +# Quick-start development settings - unsuitable for production +# See https://docs.djangoproject.com/en/4.2/howto/deployment/checklist/ + +# SECURITY WARNING: keep the secret key used in production secret! +SECRET_KEY = 'django-insecure-htw$9z*ov@^%6+x_g0=1pl^hgf+#+9aut^_eyq^hc3s++*&*$)' + +# SECURITY WARNING: don't run with debug turned on in production! +DEBUG = True + +ALLOWED_HOSTS = ['*'] + + +# Application definition + +INSTALLED_APPS = [ + 'django.contrib.admin', + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + 'django.contrib.messages', + 'django.contrib.staticfiles', +] + +MIDDLEWARE = [ + 'django.middleware.security.SecurityMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.common.CommonMiddleware', + 'django.middleware.csrf.CsrfViewMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.contrib.messages.middleware.MessageMiddleware', + 'django.middleware.clickjacking.XFrameOptionsMiddleware', +] + +ROOT_URLCONF = 'mysite.urls' + +TEMPLATES = [ + { + 'BACKEND': 'django.template.backends.django.DjangoTemplates', + 'DIRS': [], + 'APP_DIRS': True, + 'OPTIONS': { + 'context_processors': [ + 'django.template.context_processors.debug', + 'django.template.context_processors.request', + 'django.contrib.auth.context_processors.auth', + 'django.contrib.messages.context_processors.messages', + ], + }, + }, +] + +WSGI_APPLICATION = 'mysite.wsgi.application' + + +# Database +# https://docs.djangoproject.com/en/4.2/ref/settings/#databases + +DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.sqlite3', + 'NAME': BASE_DIR / 'db.sqlite3', + } +} + + +# Password validation +# https://docs.djangoproject.com/en/4.2/ref/settings/#auth-password-validators + +AUTH_PASSWORD_VALIDATORS = [ + { + 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', + }, + { + 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', + }, +] + + +# Internationalization +# https://docs.djangoproject.com/en/4.2/topics/i18n/ + +LANGUAGE_CODE = 'en-us' + +TIME_ZONE = 'UTC' + +USE_I18N = True + +USE_TZ = True + + +# Static files (CSS, JavaScript, Images) +# https://docs.djangoproject.com/en/4.2/howto/static-files/ + +STATIC_URL = 'static/' + +# Default primary key field type +# https://docs.djangoproject.com/en/4.2/ref/settings/#default-auto-field + +DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' diff --git a/test/fixtures/django-basic/mysite/urls.py b/test/fixtures/django-basic/mysite/urls.py new file mode 100644 index 0000000000..dc087204dd --- /dev/null +++ b/test/fixtures/django-basic/mysite/urls.py @@ -0,0 +1,7 @@ +from django.contrib import admin +from django.urls import include, path + +urlpatterns = [ + path("polls/", include("polls.urls")), + path("admin/", admin.site.urls), +] \ No newline at end of file diff --git a/test/fixtures/django-basic/mysite/wsgi.py b/test/fixtures/django-basic/mysite/wsgi.py new file mode 100644 index 0000000000..4ce2cc5dcd --- /dev/null +++ b/test/fixtures/django-basic/mysite/wsgi.py @@ -0,0 +1,16 @@ +""" +WSGI config for mysite project. + +It exposes the WSGI callable as a module-level variable named ``application``. + +For more information on this file, see +https://docs.djangoproject.com/en/4.2/howto/deployment/wsgi/ +""" + +import os + +from django.core.wsgi import get_wsgi_application + +os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mysite.settings') + +application = get_wsgi_application() diff --git a/test/fixtures/django-basic/polls/__init__.py b/test/fixtures/django-basic/polls/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/django-basic/polls/__pycache__/__init__.cpython-312.pyc b/test/fixtures/django-basic/polls/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f9ae50a5b107fb07a9585d73c1a484840cc14fc GIT binary patch literal 185 zcmX@j%ge<81ZinX=^*+sh(HIQS%4zb87dhx8U0o=6fpsLpFwJVMd)Yb=cejsr51sx z;-X~z^vsfs(j>j){9OIw(t^~Yw46$PAWkmH(Jx6YF40fRtSBihN-fq;$x6&i&(}># zEY3{UFUZfyDb|mV&&C5r~UHjE~HWjEqIhKo$Tm CyD=mH literal 0 HcmV?d00001 diff --git a/test/fixtures/django-basic/polls/__pycache__/urls.cpython-312.pyc b/test/fixtures/django-basic/polls/__pycache__/urls.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73cc543eeffbbdcf77a0eca091fcfda62eb58611 GIT binary patch literal 367 zcmYLDu};G<5Ix698Yl$`RwjgmIuQQ=VrF83C7sgPNfX&gWIMEFV=EIQ{RISn!panh z2{Cjl>edNcrQ)P}@7|q#@7{a8T`+NMH~c*5CZB@%pHLv zjKBegsLOB>Y(M@p_1^qfywMqoylJN&EUfIK)~#bbTA+>a?s7I(Ewo--AJa$#_oI=t2`lf#knHZI;^Mb;6hZi;p|fG8xwoFIyJDw l2%%3n{)VF;AH1E};q3Nl@Hm){Kisn=b`G3}uG}*Ze*r(jU7-K~ literal 0 HcmV?d00001 diff --git a/test/fixtures/django-basic/polls/__pycache__/views.cpython-312.pyc b/test/fixtures/django-basic/polls/__pycache__/views.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f7e8ebf2038c4478d72067e7d410bedfe99b162 GIT binary patch literal 412 zcmX|7u};G<5Vey+3vC&pqlJ(l62w1%7$`%h4j=})weht{<1(1xAc^2yk!ZqT=1Xho;jAj*HU*`XwISjj+W;&IiC>!@DBXKh5t031y!E literal 0 HcmV?d00001 diff --git a/test/fixtures/django-basic/polls/admin.py b/test/fixtures/django-basic/polls/admin.py new file mode 100644 index 0000000000..8c38f3f3da --- /dev/null +++ b/test/fixtures/django-basic/polls/admin.py @@ -0,0 +1,3 @@ +from django.contrib import admin + +# Register your models here. diff --git a/test/fixtures/django-basic/polls/apps.py b/test/fixtures/django-basic/polls/apps.py new file mode 100644 index 0000000000..5a5f94ca17 --- /dev/null +++ b/test/fixtures/django-basic/polls/apps.py @@ -0,0 +1,6 @@ +from django.apps import AppConfig + + +class PollsConfig(AppConfig): + default_auto_field = 'django.db.models.BigAutoField' + name = 'polls' diff --git a/test/fixtures/django-basic/polls/migrations/__init__.py b/test/fixtures/django-basic/polls/migrations/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/fixtures/django-basic/polls/models.py b/test/fixtures/django-basic/polls/models.py new file mode 100644 index 0000000000..71a8362390 --- /dev/null +++ b/test/fixtures/django-basic/polls/models.py @@ -0,0 +1,3 @@ +from django.db import models + +# Create your models here. diff --git a/test/fixtures/django-basic/polls/tests.py b/test/fixtures/django-basic/polls/tests.py new file mode 100644 index 0000000000..7ce503c2dd --- /dev/null +++ b/test/fixtures/django-basic/polls/tests.py @@ -0,0 +1,3 @@ +from django.test import TestCase + +# Create your tests here. diff --git a/test/fixtures/django-basic/polls/urls.py b/test/fixtures/django-basic/polls/urls.py new file mode 100644 index 0000000000..a9d7f56e7e --- /dev/null +++ b/test/fixtures/django-basic/polls/urls.py @@ -0,0 +1,7 @@ +from django.urls import path + +from . import views + +urlpatterns = [ + path("", views.index, name="index"), +] \ No newline at end of file diff --git a/test/fixtures/django-basic/polls/views.py b/test/fixtures/django-basic/polls/views.py new file mode 100644 index 0000000000..94bc318991 --- /dev/null +++ b/test/fixtures/django-basic/polls/views.py @@ -0,0 +1,5 @@ +from django.http import HttpResponse + + +def index(request): + return HttpResponse("Hello, world. You're at the polls index.") \ No newline at end of file From 735eb3d1274811f191f3fbf60fc4227288c6a953 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Thu, 31 Oct 2024 09:55:22 -0400 Subject: [PATCH 090/104] query from slug directly if supplied --- internal/prompt/prompt.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/internal/prompt/prompt.go b/internal/prompt/prompt.go index 6ac49d51f3..0e946d50a6 100644 --- a/internal/prompt/prompt.go +++ b/internal/prompt/prompt.go @@ -242,6 +242,11 @@ var errOrgSlugRequired = NonInteractiveError("org slug must be specified when no func Org(ctx context.Context) (*fly.Organization, error) { client := flyutil.ClientFromContext(ctx) + slug := config.FromContext(ctx).Organization + if slug != "" { + return client.GetOrganizationBySlug(ctx, slug) + } + orgs, err := client.GetOrganizations(ctx) if err != nil { return nil, err @@ -249,7 +254,6 @@ func Org(ctx context.Context) (*fly.Organization, error) { sort.OrganizationsByTypeAndName(orgs) io := iostreams.FromContext(ctx) - slug := config.FromContext(ctx).Organization switch { case slug == "" && len(orgs) == 1 && orgs[0].Type == "PERSONAL": From 6de1bfb277fb5f9f4c6087cbb8e132ed1fc332fb Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Thu, 31 Oct 2024 11:59:28 -0400 Subject: [PATCH 091/104] don't prompt for bucket name on tigris if using the --yes flag --- internal/command/extensions/core/core.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/command/extensions/core/core.go b/internal/command/extensions/core/core.go index 7fc3fb8829..fa267890c6 100644 --- a/internal/command/extensions/core/core.go +++ b/internal/command/extensions/core/core.go @@ -102,17 +102,17 @@ func ProvisionExtension(ctx context.Context, params ExtensionParams) (extension if override := params.OverrideName; override != nil { name = *override } else { - if name == "" { - name = flag.GetString(ctx, "name") - } + name = flag.GetString(ctx, "name") if name == "" { if provider.NameSuffix != "" && targetApp.Name != "" { name = targetApp.Name + "-" + provider.NameSuffix } - err = prompt.String(ctx, &name, "Choose a name, use the default, or leave blank to generate one:", name, false) - if err != nil { - return + if !flag.GetYes(ctx) { + err = prompt.String(ctx, &name, "Choose a name, use the default, or leave blank to generate one:", name, false) + if err != nil { + return + } } } } From 64878e368241c31dc8347c6f594cf983e6c1f14a Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Fri, 1 Nov 2024 12:47:40 -0400 Subject: [PATCH 092/104] fix go deployments when missing go.sum --- scanner/go.go | 30 +++++++++++++++++------------- scanner/templates/go/Dockerfile | 4 ++++ test/deployer/deployer_test.go | 25 +++++++++++++++++++++++++ test/fixtures/go-no-go-sum/go.mod | 3 +++ test/fixtures/go-no-go-sum/main.go | 21 +++++++++++++++++++++ 5 files changed, 70 insertions(+), 13 deletions(-) create mode 100644 test/fixtures/go-no-go-sum/go.mod create mode 100644 test/fixtures/go-no-go-sum/main.go diff --git a/scanner/go.go b/scanner/go.go index 51073644d5..51ed104758 100644 --- a/scanner/go.go +++ b/scanner/go.go @@ -14,19 +14,13 @@ func configureGo(sourceDir string, config *ScannerConfig) (*SourceInfo, error) { return nil, nil } - s := &SourceInfo{ - Files: templates("templates/go"), - Family: "Go", - Port: 8080, - Env: map[string]string{ - "PORT": "8080", - }, - Runtime: plan.RuntimeStruct{Language: "go"}, - } + vars := make(map[string]interface{}) + + var skipDeploy bool if !absFileExists("go.sum") { - s.SkipDeploy = true - terminal.Warn("no go.sum file found, please adjust your Dockerfile to remove references to go.sum") + vars["skipGoSum"] = true + skipDeploy = true } gomod, parseErr := parseModfile() @@ -38,8 +32,18 @@ func configureGo(sourceDir string, config *ScannerConfig) (*SourceInfo, error) { version = gomod.Go.Version } - s.BuildArgs = map[string]string{ - "GO_VERSION": version, + s := &SourceInfo{ + Files: templatesExecute("templates/go", vars), + Family: "Go", + Port: 8080, + Env: map[string]string{ + "PORT": "8080", + }, + Runtime: plan.RuntimeStruct{Language: "go", Version: version}, + BuildArgs: map[string]string{ + "GO_VERSION": version, + }, + SkipDeploy: skipDeploy, } return s, nil diff --git a/scanner/templates/go/Dockerfile b/scanner/templates/go/Dockerfile index fa878e19b1..ac09cf1cfb 100644 --- a/scanner/templates/go/Dockerfile +++ b/scanner/templates/go/Dockerfile @@ -2,7 +2,11 @@ ARG GO_VERSION=1 FROM golang:${GO_VERSION}-bookworm as builder WORKDIR /usr/src/app +{{ if .skipGoSum -}} +COPY go.mod ./ +{{ else -}} COPY go.mod go.sum ./ +{{ end -}} RUN go mod download && go mod verify COPY . . RUN go build -v -o /run-app . diff --git a/test/deployer/deployer_test.go b/test/deployer/deployer_test.go index 46a3264136..915ba9fcc9 100644 --- a/test/deployer/deployer_test.go +++ b/test/deployer/deployer_test.go @@ -294,6 +294,31 @@ func TestLaunchDjangoBasic(t *testing.T) { require.Contains(t, string(body), "Hello, world. You're at the polls index.") } +func TestLaunchGoNoGoSum(t *testing.T) { + deploy := testDeployer(t, + withFixtureApp("go-no-go-sum"), + createRandomApp, + testlib.WithoutCustomize, + testlib.WithouExtensions, + testlib.DeployNow, + withWorkDirAppSource, + testlib.CleanupBeforeExit, + ) + + manifest, err := deploy.Output().ArtifactManifest() + require.NoError(t, err) + require.NotNil(t, manifest) + + require.Equal(t, "go", manifest.Plan.Runtime.Language) + require.Equal(t, "1.22.6", manifest.Plan.Runtime.Version) + + appName := deploy.Extra["appName"].(string) + + body, err := testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev/", appName)) + require.NoError(t, err) + require.Contains(t, string(body), "Hello from Go!") +} + func createRandomApp(d *testlib.DeployTestRun) { appName := d.CreateRandomAppName() require.NotEmpty(d, appName) diff --git a/test/fixtures/go-no-go-sum/go.mod b/test/fixtures/go-no-go-sum/go.mod new file mode 100644 index 0000000000..84b7465c91 --- /dev/null +++ b/test/fixtures/go-no-go-sum/go.mod @@ -0,0 +1,3 @@ +module example.com/m + +go 1.22.6 diff --git a/test/fixtures/go-no-go-sum/main.go b/test/fixtures/go-no-go-sum/main.go new file mode 100644 index 0000000000..335db45b58 --- /dev/null +++ b/test/fixtures/go-no-go-sum/main.go @@ -0,0 +1,21 @@ +package main + +import ( + "log" + "net/http" + "os" +) + +func main() { + port := os.Getenv("PORT") + if port == "" { + port = "8080" + + } + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("Hello from Go!")) + }) + + log.Println("listening on", port) + log.Fatal(http.ListenAndServe(":"+port, nil)) +} From ec683eb47e29e255da6040d90fc6351dea5e1827 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Fri, 1 Nov 2024 16:07:37 -0400 Subject: [PATCH 093/104] upgrade default deno to 2.0.4, try to detect more deno apps, add test for extremely minimal deno app --- scanner/deno.go | 20 +++++++++++++++++--- scanner/templates/deno/Dockerfile | 4 ++-- test/deployer/deployer_test.go | 24 ++++++++++++++++++++++++ test/fixtures/deno-no-config/index.ts | 3 +++ 4 files changed, 46 insertions(+), 5 deletions(-) create mode 100644 test/fixtures/deno-no-config/index.ts diff --git a/scanner/deno.go b/scanner/deno.go index 441ee982cb..be4359d9ed 100644 --- a/scanner/deno.go +++ b/scanner/deno.go @@ -1,6 +1,11 @@ package scanner -import "github.com/superfly/flyctl/internal/command/launch/plan" +import ( + "fmt" + "path/filepath" + + "github.com/superfly/flyctl/internal/command/launch/plan" +) func configureDeno(sourceDir string, config *ScannerConfig) (*SourceInfo, error) { if !checksPass( @@ -8,17 +13,26 @@ func configureDeno(sourceDir string, config *ScannerConfig) (*SourceInfo, error) // default config files: https://deno.land/manual@v1.35.2/getting_started/configuration_file fileExists("deno.json", "deno.jsonc"), // deno.land and denopkg.com imports - dirContains("*.ts", "\"https?://deno\\.land/.*\"", "\"https?://denopkg\\.com/.*\""), + dirContains("*.ts", `"https?://deno\.land/.*"`, `"https?://denopkg\.com/.*"`, `import "(.*)\.tsx{0,}"`, `from "npm:.*"`, `from "jsr:.*"`, `Deno\.serve\(.*`, `Deno\.listen\(.*`), ) { return nil, nil } + var entrypoint string + + for _, path := range []string{"index.ts", "app.ts", "server.ts"} { + if absFileExists(filepath.Join(sourceDir, path)) { + entrypoint = path + break + } + } + s := &SourceInfo{ Files: templates("templates/deno"), Family: "Deno", Port: 8080, Processes: map[string]string{ - "app": "run --allow-net ./example.ts", + "app": fmt.Sprintf("run -A ./%s", entrypoint), }, Env: map[string]string{ "PORT": "8080", diff --git a/scanner/templates/deno/Dockerfile b/scanner/templates/deno/Dockerfile index fc4e5e621e..ab4b6e5b63 100644 --- a/scanner/templates/deno/Dockerfile +++ b/scanner/templates/deno/Dockerfile @@ -1,6 +1,6 @@ # Based on https://github.com/denoland/deno_docker/blob/main/alpine.dockerfile -ARG DENO_VERSION=1.14.0 +ARG DENO_VERSION=2.0.4 ARG BIN_IMAGE=denoland/deno:bin-${DENO_VERSION} FROM ${BIN_IMAGE} AS bin @@ -24,4 +24,4 @@ WORKDIR /deno-dir COPY . . ENTRYPOINT ["/bin/deno"] -CMD ["run", "--allow-net", "https://deno.land/std/examples/echo_server.ts"] +CMD ["run", "https://deno.land/std/examples/echo_server.ts"] diff --git a/test/deployer/deployer_test.go b/test/deployer/deployer_test.go index 915ba9fcc9..cf65f8a8a3 100644 --- a/test/deployer/deployer_test.go +++ b/test/deployer/deployer_test.go @@ -319,6 +319,30 @@ func TestLaunchGoNoGoSum(t *testing.T) { require.Contains(t, string(body), "Hello from Go!") } +func TestLaunchDenoNoConfig(t *testing.T) { + deploy := testDeployer(t, + withFixtureApp("deno-no-config"), + createRandomApp, + testlib.WithoutCustomize, + testlib.WithouExtensions, + testlib.DeployNow, + withWorkDirAppSource, + testlib.CleanupBeforeExit, + ) + + manifest, err := deploy.Output().ArtifactManifest() + require.NoError(t, err) + require.NotNil(t, manifest) + + require.Equal(t, "deno", manifest.Plan.Runtime.Language) + + appName := deploy.Extra["appName"].(string) + + body, err := testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev/", appName)) + require.NoError(t, err) + require.Contains(t, string(body), "Hello, World!") +} + func createRandomApp(d *testlib.DeployTestRun) { appName := d.CreateRandomAppName() require.NotEmpty(d, appName) diff --git a/test/fixtures/deno-no-config/index.ts b/test/fixtures/deno-no-config/index.ts new file mode 100644 index 0000000000..7f3a0fb466 --- /dev/null +++ b/test/fixtures/deno-no-config/index.ts @@ -0,0 +1,3 @@ +Deno.serve({ port: 8080, hostname: "0.0.0.0" }, (_req) => { + return new Response("Hello, World!"); +}); \ No newline at end of file From ec2751c8f9707a64f82d6db0ae4b1309db44b70e Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Tue, 5 Nov 2024 13:25:22 -0500 Subject: [PATCH 094/104] attempt at accepting customize from file (#4046) * attempt at accepting customize from file * add a test for pre-customize.json * customize json is just a map of data --- deploy.rb | 18 +++++-- internal/command/launch/launch.go | 18 +++++++ internal/command/launch/sessions.go | 76 ++++++++++++++++++----------- test/deployer/deployer_test.go | 29 +++++++++++ test/testlib/deployer.go | 22 +++++++++ 5 files changed, 130 insertions(+), 33 deletions(-) diff --git a/deploy.rb b/deploy.rb index e918c67dd4..9d851e280e 100755 --- a/deploy.rb +++ b/deploy.rb @@ -18,7 +18,8 @@ Dir.chdir("/usr/src/app") DEPLOY_NOW = !get_env("DEPLOY_NOW").nil? -DEPLOY_CUSTOMIZE = !get_env("NO_DEPLOY_CUSTOMIZE") +DEPLOY_CUSTOMIZE_PATH = get_env("DEPLOY_CUSTOMIZE_PATH") +DEPLOY_CUSTOMIZE = !get_env("NO_DEPLOY_CUSTOMIZE") || !DEPLOY_CUSTOMIZE_PATH.nil? DEPLOY_ONLY = !get_env("DEPLOY_ONLY").nil? CREATE_AND_PUSH_BRANCH = !get_env("DEPLOY_CREATE_AND_PUSH_BRANCH").nil? FLYIO_BRANCH_NAME = "flyio-new-files" @@ -239,15 +240,22 @@ if DEPLOY_CUSTOMIZE manifest = in_step Step::CUSTOMIZE do - cmd = "flyctl launch sessions create --session-path /tmp/session.json --manifest-path #{MANIFEST_PATH} --from-manifest #{MANIFEST_PATH}" + if DEPLOY_CUSTOMIZE_PATH.nil? + cmd = "flyctl launch sessions create --session-path /tmp/session.json --manifest-path #{MANIFEST_PATH} --from-manifest #{MANIFEST_PATH}" - exec_capture(cmd) - session = JSON.parse(File.read("/tmp/session.json")) + exec_capture(cmd) + session = JSON.parse(File.read("/tmp/session.json")) - artifact Artifact::SESSION, session + artifact Artifact::SESSION, session + end cmd = "flyctl launch sessions finalize --session-path /tmp/session.json --manifest-path #{MANIFEST_PATH}" + if !DEPLOY_CUSTOMIZE_PATH.nil? + cmd += " --from-file #{DEPLOY_CUSTOMIZE_PATH}" + artifact Artifact::SESSION, JSON.parse(File.read(DEPLOY_CUSTOMIZE_PATH)) + end + exec_capture(cmd) manifest = JSON.parse(File.read("/tmp/manifest.json")) diff --git a/internal/command/launch/launch.go b/internal/command/launch/launch.go index 21827b619c..ed8c4982d3 100644 --- a/internal/command/launch/launch.go +++ b/internal/command/launch/launch.go @@ -185,6 +185,24 @@ func updateConfig(plan *plan.LaunchPlan, env map[string]string, appConfig *appco appConfig.HTTPService = nil } appConfig.Compute = plan.Compute + + if plan.CPUKind != "" { + for _, c := range appConfig.Compute { + c.CPUKind = plan.CPUKind + } + } + + if plan.CPUs != 0 { + for _, c := range appConfig.Compute { + c.CPUs = plan.CPUs + } + } + + if plan.MemoryMB != 0 { + for _, c := range appConfig.Compute { + c.MemoryMB = plan.MemoryMB + } + } } // createApp creates the fly.io app for the plan diff --git a/internal/command/launch/sessions.go b/internal/command/launch/sessions.go index b74b26279a..a6c9ede18e 100644 --- a/internal/command/launch/sessions.go +++ b/internal/command/launch/sessions.go @@ -94,6 +94,11 @@ func newSessions() *cobra.Command { Description: "Path to write the manifest info to", Default: "manifest.json", }, + flag.String{ + Name: "from-file", + Description: "Path to a CLI session JSON file", + Default: "", + }, ) // not that useful anywhere else yet @@ -192,14 +197,41 @@ func runSessionFinalize(ctx context.Context) (err error) { io := iostreams.FromContext(ctx) logger := logger.FromContext(ctx) - sessionBytes, err := os.ReadFile(flag.GetString(ctx, "session-path")) - if err != nil { - return err - } + var finalMeta map[string]interface{} - var session fly.CLISession - if err := json.Unmarshal(sessionBytes, &session); err != nil { - return err + if customizePath := flag.GetString(ctx, "from-file"); customizePath != "" { + sessionBytes, err := os.ReadFile(customizePath) + if err != nil { + return err + } + + if err := json.Unmarshal(sessionBytes, &finalMeta); err != nil { + return err + } + } else { + sessionBytes, err := os.ReadFile(flag.GetString(ctx, "session-path")) + if err != nil { + return err + } + + var session fly.CLISession + if err := json.Unmarshal(sessionBytes, &session); err != nil { + return err + } + + // FIXME: better timeout here + ctx, cancel := context.WithTimeout(ctx, 15*time.Minute) + defer cancel() + + finalSession, err := waitForCLISession(ctx, logger, io.ErrOut, session.ID) + switch { + case errors.Is(err, context.DeadlineExceeded): + return errors.New("session expired, please try again") + case err != nil: + return err + } + + finalMeta = finalSession.Metadata } manifestBytes, err := os.ReadFile(flag.GetString(ctx, "manifest-path")) @@ -219,27 +251,8 @@ func runSessionFinalize(ctx context.Context) (err error) { warnedNoCcHa: true, } - // FIXME: better timeout here - ctx, cancel := context.WithTimeout(ctx, 15*time.Minute) - defer cancel() - - finalSession, err := waitForCLISession(ctx, logger, io.ErrOut, session.ID) - switch { - case errors.Is(err, context.DeadlineExceeded): - return errors.New("session expired, please try again") - case err != nil: - return err - } - // Hack because somewhere from between UI and here, the numbers get converted to strings - if err := patchNumbers(finalSession.Metadata, "vm_cpus", "vm_memory"); err != nil { - return err - } - - // Wasteful, but gets the job done without uprooting the session types. - // Just round-trip the map[string]interface{} back into json, so we can re-deserialize it into a complete type. - metaJson, err := json.Marshal(finalSession.Metadata) - if err != nil { + if err := patchNumbers(finalMeta, "vm_cpus", "vm_memory"); err != nil { return err } @@ -254,6 +267,13 @@ func runSessionFinalize(ctx context.Context) (err error) { oldPlan := helpers.Clone(state.Plan) + // Wasteful, but gets the job done without uprooting the session types. + // Just round-trip the map[string]interface{} back into json, so we can re-deserialize it into a complete type. + metaJson, err := json.Marshal(finalMeta) + if err != nil { + return err + } + err = json.Unmarshal(metaJson, &state.Plan) if err != nil { return err @@ -262,7 +282,7 @@ func runSessionFinalize(ctx context.Context) (err error) { // Patch in some fields that we keep in the plan that aren't persisted by the UI. // Technically, we should probably just be persisting this, but there's // no clear value to the UI having these fields currently. - if _, ok := finalSession.Metadata["ha"]; !ok { + if _, ok := finalMeta["ha"]; !ok { state.Plan.HighAvailability = oldPlan.HighAvailability } // This should never be changed by the UI!! diff --git a/test/deployer/deployer_test.go b/test/deployer/deployer_test.go index cf65f8a8a3..0a3f97f734 100644 --- a/test/deployer/deployer_test.go +++ b/test/deployer/deployer_test.go @@ -198,6 +198,35 @@ func TestLaunchGoFromRepo(t *testing.T) { require.Contains(t, string(body), "I'm running in the yyz region") } +func TestLaunchPreCustomized(t *testing.T) { + customize := map[string]interface{}{ + "vm_memory": 2048, + } + + deploy := testDeployer(t, + createRandomApp, + testlib.WithRegion("yyz"), + testlib.WithPreCustomize(&customize), + testlib.WithouExtensions, + testlib.DeployNow, + testlib.WithGitRepo("https://github.com/fly-apps/go-example"), + ) + + appName := deploy.Extra["appName"].(string) + + manifest, err := deploy.Output().ArtifactManifest() + require.NoError(t, err) + require.NotNil(t, manifest) + + require.Equal(t, manifest.Plan.Guest().MemoryMB, 2048) + require.Equal(t, manifest.Config.Compute[0].MemoryMB, 2048) + + body, err := testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev", appName)) + require.NoError(t, err) + + require.Contains(t, string(body), "I'm running in the yyz region") +} + func TestLaunchRails70(t *testing.T) { deploy := testDeployer(t, withFixtureApp("deploy-rails-7.0"), diff --git a/test/testlib/deployer.go b/test/testlib/deployer.go index 22559361db..04637e85fa 100644 --- a/test/testlib/deployer.go +++ b/test/testlib/deployer.go @@ -11,6 +11,7 @@ import ( "fmt" "io" "os" + "path/filepath" "strings" "testing" @@ -94,6 +95,7 @@ type DeployTestRun struct { skipExtensions bool copyConfig bool optOutGha bool + customizePath string deployOnly bool deployNow bool @@ -127,6 +129,22 @@ func WithApp(app string) func(*DeployTestRun) { } } +func WithPreCustomize(customize interface{}) func(*DeployTestRun) { + b, err := json.Marshal(customize) + if err != nil { + panic(err) + } + return func(d *DeployTestRun) { + p := filepath.Join(d.WorkDir(), "customize.json") + if err := os.WriteFile(p, b, 0666); err != nil { + panic(err) + } + dst := "/opt/customize.json" + d.containerBinds = append(d.containerBinds, fmt.Sprintf("%s:%s", p, dst)) + d.customizePath = dst + } +} + func WithGitRepo(repo string) func(*DeployTestRun) { return func(d *DeployTestRun) { d.gitRepo = repo @@ -237,6 +255,10 @@ func (d *DeployTestRun) Start(ctx context.Context) error { env = append(env, "DEPLOYER_CLEANUP_BEFORE_EXIT=1") } + if d.customizePath != "" { + env = append(env, fmt.Sprintf("DEPLOY_CUSTOMIZE_PATH=%s", d.customizePath)) + } + fmt.Printf("creating container... image=%s\n", d.deployerImage) cont, err := d.dockerClient.ContainerCreate(ctx, &container.Config{ Image: d.deployerImage, From c5420227e31d3b3c9fd0b769eb4cd2e35ece9b14 Mon Sep 17 00:00:00 2001 From: Jerome Gravel-Niquet Date: Tue, 5 Nov 2024 13:32:33 -0500 Subject: [PATCH 095/104] add static site fixture --- test/deployer/deployer_test.go | 24 ++++++++++++++++++++++++ test/fixtures/static/index.html | 6 ++++++ 2 files changed, 30 insertions(+) create mode 100644 test/fixtures/static/index.html diff --git a/test/deployer/deployer_test.go b/test/deployer/deployer_test.go index 0a3f97f734..c3fad57fe8 100644 --- a/test/deployer/deployer_test.go +++ b/test/deployer/deployer_test.go @@ -372,6 +372,30 @@ func TestLaunchDenoNoConfig(t *testing.T) { require.Contains(t, string(body), "Hello, World!") } +func TestLaunchStatic(t *testing.T) { + deploy := testDeployer(t, + withFixtureApp("static"), + createRandomApp, + testlib.WithoutCustomize, + testlib.WithouExtensions, + testlib.DeployNow, + withWorkDirAppSource, + testlib.CleanupBeforeExit, + ) + + manifest, err := deploy.Output().ArtifactManifest() + require.NoError(t, err) + require.NotNil(t, manifest) + + require.Equal(t, "Static", manifest.Plan.ScannerFamily) + + appName := deploy.Extra["appName"].(string) + + body, err := testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev/", appName)) + require.NoError(t, err) + require.Contains(t, string(body), "Hello World") +} + func createRandomApp(d *testlib.DeployTestRun) { appName := d.CreateRandomAppName() require.NotEmpty(d, appName) diff --git a/test/fixtures/static/index.html b/test/fixtures/static/index.html new file mode 100644 index 0000000000..14612b0114 --- /dev/null +++ b/test/fixtures/static/index.html @@ -0,0 +1,6 @@ + + + +Hello World + + \ No newline at end of file From c76b9650b6459a61cef1b8dc627cf6814af4714a Mon Sep 17 00:00:00 2001 From: Lubien Date: Tue, 17 Dec 2024 15:18:42 -0300 Subject: [PATCH 096/104] Deployers phoenix improvements (#4113) * Fix phoenix release env * Support custom tool-versions for phoenix * Always gen files at launch * Test phoenix * Fix lint refer to https://github.com/superfly/flyctl/actions/runs/12236467772/job/34129993067?pr=4113 * Fix phoenix test * this is actually useless * Add TestDeployPhoenixSqlite * Fix phoenix for real * well thats the fix --- deploy.rb | 5 +- deployer.Dockerfile | 2 +- internal/command/launch/launch.go | 9 - scanner/phoenix.go | 28 + scanner/rails.go | 4 - test/deployer/deployer_test.go | 48 ++ .../.dockerignore | 45 ++ .../.formatter.exs | 5 + .../.gitignore | 36 + .../.tool-versions | 2 + .../Dockerfile | 95 +++ .../LICENSE | 201 ++++++ .../README.md | 82 +++ .../assets/css/app.css | 89 +++ .../assets/css/phoenix.css | 101 +++ .../assets/js/app.js | 44 ++ .../assets/vendor/topbar.js | 157 ++++ .../config/config.exs | 51 ++ .../config/dev.exs | 71 ++ .../config/prod.exs | 51 ++ .../config/runtime.exs | 85 +++ .../config/test.exs | 30 + .../entrypoint.sh | 9 + .../fly.toml | 55 ++ .../lib/hello_elixir.ex | 9 + .../lib/hello_elixir/application.ex | 36 + .../lib/hello_elixir/mailer.ex | 3 + .../lib/hello_elixir/release.ex | 28 + .../lib/hello_elixir/repo.ex | 5 + .../lib/hello_elixir_web.ex | 102 +++ .../controllers/page_controller.ex | 7 + .../lib/hello_elixir_web/endpoint.ex | 50 ++ .../lib/hello_elixir_web/gettext.ex | 24 + .../lib/hello_elixir_web/router.ex | 55 ++ .../lib/hello_elixir_web/telemetry.ex | 71 ++ .../templates/layout/app.html.heex | 5 + .../templates/layout/live.html.heex | 11 + .../templates/layout/root.html.heex | 30 + .../templates/page/index.html.heex | 41 ++ .../hello_elixir_web/views/error_helpers.ex | 47 ++ .../lib/hello_elixir_web/views/error_view.ex | 16 + .../lib/hello_elixir_web/views/layout_view.ex | 7 + .../lib/hello_elixir_web/views/page_view.ex | 3 + .../mix.exs | 70 ++ .../mix.lock | 38 + .../priv/gettext/en/LC_MESSAGES/errors.po | 112 +++ .../priv/gettext/errors.pot | 95 +++ .../priv/repo/migrations/.formatter.exs | 4 + ...210505214438_create_a_migration_to_run.exs | 9 + .../priv/repo/seeds.exs | 11 + .../priv/static/favicon.ico | Bin 0 -> 1258 bytes .../priv/static/images/phoenix.png | Bin 0 -> 13900 bytes .../priv/static/robots.txt | 5 + .../rel/env.sh.eex | 13 + .../rel/overlays/bin/migrate | 3 + .../rel/overlays/bin/migrate.bat | 1 + .../rel/overlays/bin/server | 3 + .../rel/overlays/bin/server.bat | 2 + .../controllers/page_controller_test.exs | 8 + .../views/error_view_test.exs | 14 + .../views/layout_view_test.exs | 8 + .../hello_elixir_web/views/page_view_test.exs | 3 + .../test/support/channel_case.ex | 36 + .../test/support/conn_case.ex | 39 + .../test/support/data_case.ex | 51 ++ .../test/test_helper.exs | 2 + .../deploy-phoenix-sqlite/.dockerignore | 45 ++ .../deploy-phoenix-sqlite/.formatter.exs | 6 + .../fixtures/deploy-phoenix-sqlite/.gitignore | 41 ++ .../fixtures/deploy-phoenix-sqlite/Dockerfile | 97 +++ test/fixtures/deploy-phoenix-sqlite/README.md | 18 + .../deploy-phoenix-sqlite/assets/css/app.css | 5 + .../deploy-phoenix-sqlite/assets/js/app.js | 44 ++ .../assets/tailwind.config.js | 74 ++ .../assets/vendor/topbar.js | 165 +++++ .../deploy-phoenix-sqlite/config/config.exs | 66 ++ .../deploy-phoenix-sqlite/config/dev.exs | 83 +++ .../deploy-phoenix-sqlite/config/prod.exs | 21 + .../deploy-phoenix-sqlite/config/runtime.exs | 113 +++ .../deploy-phoenix-sqlite/config/test.exs | 34 + test/fixtures/deploy-phoenix-sqlite/fly.toml | 37 + .../lib/deploy_phoenix_sqlite.ex | 9 + .../lib/deploy_phoenix_sqlite/application.ex | 44 ++ .../lib/deploy_phoenix_sqlite/mailer.ex | 3 + .../lib/deploy_phoenix_sqlite/release.ex | 28 + .../lib/deploy_phoenix_sqlite/repo.ex | 5 + .../lib/deploy_phoenix_sqlite_web.ex | 113 +++ .../components/core_components.ex | 676 ++++++++++++++++++ .../components/layouts.ex | 14 + .../components/layouts/app.html.heex | 32 + .../components/layouts/root.html.heex | 17 + .../controllers/error_html.ex | 24 + .../controllers/error_json.ex | 21 + .../controllers/page_controller.ex | 9 + .../controllers/page_html.ex | 10 + .../controllers/page_html/home.html.heex | 222 ++++++ .../lib/deploy_phoenix_sqlite_web/endpoint.ex | 53 ++ .../lib/deploy_phoenix_sqlite_web/gettext.ex | 24 + .../lib/deploy_phoenix_sqlite_web/router.ex | 44 ++ .../deploy_phoenix_sqlite_web/telemetry.ex | 92 +++ test/fixtures/deploy-phoenix-sqlite/mix.exs | 85 +++ test/fixtures/deploy-phoenix-sqlite/mix.lock | 44 ++ .../priv/gettext/en/LC_MESSAGES/errors.po | 112 +++ .../priv/gettext/errors.pot | 109 +++ .../priv/repo/migrations/.formatter.exs | 4 + .../deploy-phoenix-sqlite/priv/repo/seeds.exs | 11 + .../priv/static/favicon.ico | Bin 0 -> 152 bytes .../priv/static/images/logo.svg | 6 + .../priv/static/robots.txt | 5 + .../deploy-phoenix-sqlite/rel/env.sh.eex | 13 + .../rel/overlays/bin/migrate | 5 + .../rel/overlays/bin/migrate.bat | 1 + .../rel/overlays/bin/server | 5 + .../rel/overlays/bin/server.bat | 2 + .../controllers/error_html_test.exs | 14 + .../controllers/error_json_test.exs | 12 + .../controllers/page_controller_test.exs | 8 + .../test/support/conn_case.ex | 38 + .../test/support/data_case.ex | 58 ++ .../test/test_helper.exs | 2 + 120 files changed, 5189 insertions(+), 16 deletions(-) create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/.dockerignore create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/.formatter.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/.gitignore create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/.tool-versions create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/Dockerfile create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/LICENSE create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/README.md create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/assets/css/app.css create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/assets/css/phoenix.css create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/assets/js/app.js create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/assets/vendor/topbar.js create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/config/config.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/config/dev.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/config/prod.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/config/runtime.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/config/test.exs create mode 100755 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/entrypoint.sh create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/fly.toml create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir/application.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir/mailer.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir/release.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir/repo.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/controllers/page_controller.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/endpoint.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/gettext.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/router.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/telemetry.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/templates/layout/app.html.heex create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/templates/layout/live.html.heex create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/templates/layout/root.html.heex create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/templates/page/index.html.heex create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/views/error_helpers.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/views/error_view.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/views/layout_view.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/views/page_view.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/mix.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/mix.lock create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/gettext/en/LC_MESSAGES/errors.po create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/gettext/errors.pot create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/repo/migrations/.formatter.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/repo/migrations/20210505214438_create_a_migration_to_run.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/repo/seeds.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/static/favicon.ico create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/static/images/phoenix.png create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/static/robots.txt create mode 100755 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/rel/env.sh.eex create mode 100755 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/rel/overlays/bin/migrate create mode 100755 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/rel/overlays/bin/migrate.bat create mode 100755 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/rel/overlays/bin/server create mode 100755 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/rel/overlays/bin/server.bat create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/hello_elixir_web/controllers/page_controller_test.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/hello_elixir_web/views/error_view_test.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/hello_elixir_web/views/layout_view_test.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/hello_elixir_web/views/page_view_test.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/support/channel_case.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/support/conn_case.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/support/data_case.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/test_helper.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite/.dockerignore create mode 100644 test/fixtures/deploy-phoenix-sqlite/.formatter.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite/.gitignore create mode 100644 test/fixtures/deploy-phoenix-sqlite/Dockerfile create mode 100644 test/fixtures/deploy-phoenix-sqlite/README.md create mode 100644 test/fixtures/deploy-phoenix-sqlite/assets/css/app.css create mode 100644 test/fixtures/deploy-phoenix-sqlite/assets/js/app.js create mode 100644 test/fixtures/deploy-phoenix-sqlite/assets/tailwind.config.js create mode 100644 test/fixtures/deploy-phoenix-sqlite/assets/vendor/topbar.js create mode 100644 test/fixtures/deploy-phoenix-sqlite/config/config.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite/config/dev.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite/config/prod.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite/config/runtime.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite/config/test.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite/fly.toml create mode 100644 test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite/application.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite/mailer.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite/release.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite/repo.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/components/core_components.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/components/layouts.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/components/layouts/app.html.heex create mode 100644 test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/components/layouts/root.html.heex create mode 100644 test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/controllers/error_html.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/controllers/error_json.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/controllers/page_controller.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/controllers/page_html.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/controllers/page_html/home.html.heex create mode 100644 test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/endpoint.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/gettext.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/router.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/telemetry.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite/mix.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite/mix.lock create mode 100644 test/fixtures/deploy-phoenix-sqlite/priv/gettext/en/LC_MESSAGES/errors.po create mode 100644 test/fixtures/deploy-phoenix-sqlite/priv/gettext/errors.pot create mode 100644 test/fixtures/deploy-phoenix-sqlite/priv/repo/migrations/.formatter.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite/priv/repo/seeds.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite/priv/static/favicon.ico create mode 100644 test/fixtures/deploy-phoenix-sqlite/priv/static/images/logo.svg create mode 100644 test/fixtures/deploy-phoenix-sqlite/priv/static/robots.txt create mode 100755 test/fixtures/deploy-phoenix-sqlite/rel/env.sh.eex create mode 100755 test/fixtures/deploy-phoenix-sqlite/rel/overlays/bin/migrate create mode 100755 test/fixtures/deploy-phoenix-sqlite/rel/overlays/bin/migrate.bat create mode 100755 test/fixtures/deploy-phoenix-sqlite/rel/overlays/bin/server create mode 100755 test/fixtures/deploy-phoenix-sqlite/rel/overlays/bin/server.bat create mode 100644 test/fixtures/deploy-phoenix-sqlite/test/deploy_phoenix_sqlite_web/controllers/error_html_test.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite/test/deploy_phoenix_sqlite_web/controllers/error_json_test.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite/test/deploy_phoenix_sqlite_web/controllers/page_controller_test.exs create mode 100644 test/fixtures/deploy-phoenix-sqlite/test/support/conn_case.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite/test/support/data_case.ex create mode 100644 test/fixtures/deploy-phoenix-sqlite/test/test_helper.exs diff --git a/deploy.rb b/deploy.rb index 9d851e280e..3b6c4d7b4d 100755 --- a/deploy.rb +++ b/deploy.rb @@ -24,6 +24,7 @@ CREATE_AND_PUSH_BRANCH = !get_env("DEPLOY_CREATE_AND_PUSH_BRANCH").nil? FLYIO_BRANCH_NAME = "flyio-new-files" +DEPLOY_TRIGGER = get_env("DEPLOY_TRIGGER") DEPLOYER_FLY_CONFIG_PATH = get_env("DEPLOYER_FLY_CONFIG_PATH") DEPLOYER_SOURCE_CWD = get_env("DEPLOYER_SOURCE_CWD") DEPLOY_APP_NAME = get_env("DEPLOY_APP_NAME") @@ -268,7 +269,7 @@ ORG_SLUG = manifest["plan"]["org"] APP_REGION = manifest["plan"]["region"] - DO_GEN_REQS = !DEPLOY_COPY_CONFIG || !HAS_FLY_CONFIG + DO_GEN_REQS = DEPLOY_TRIGGER == "launch" debug("generate reqs? #{DO_GEN_REQS}") @@ -314,7 +315,7 @@ APP_NAME = DEPLOY_APP_NAME || fly_config["app"] image_ref = in_step Step::BUILD do - image_tag = SecureRandom.hex(16) + image_tag = "deployment-#{SecureRandom.hex(16)}" if (image_ref = fly_config.dig("build","image")&.strip) && !image_ref.nil? && !image_ref.empty? info("Skipping build, using image defined in fly config: #{image_ref}") image_ref diff --git a/deployer.Dockerfile b/deployer.Dockerfile index 941598e470..4942e0df97 100644 --- a/deployer.Dockerfile +++ b/deployer.Dockerfile @@ -55,10 +55,10 @@ RUN git clone https://github.com/asdf-vm/asdf.git $HOME/.asdf --branch v0.14.0 & asdf install nodejs $DEFAULT_NODE_VERSION && asdf global nodejs $DEFAULT_NODE_VERSION && \ # elixir asdf plugin-add erlang https://github.com/michallepicki/asdf-erlang-prebuilt-ubuntu-20.04.git && \ - echo -e "local.hex\nlocal.rebar" > $HOME/.default-mix-commands && \ asdf plugin add elixir https://github.com/asdf-vm/asdf-elixir.git && \ asdf install erlang $DEFAULT_ERLANG_VERSION && asdf global erlang $DEFAULT_ERLANG_VERSION && \ asdf install elixir $DEFAULT_ELIXIR_VERSION && asdf global elixir $DEFAULT_ELIXIR_VERSION && \ + mix local.hex --force && mix local.rebar --force && \ # bun asdf plugin add bun https://github.com/cometkim/asdf-bun.git && \ asdf install bun $DEFAULT_BUN_VERSION && asdf global bun $DEFAULT_BUN_VERSION diff --git a/internal/command/launch/launch.go b/internal/command/launch/launch.go index ed8c4982d3..52bc26f687 100644 --- a/internal/command/launch/launch.go +++ b/internal/command/launch/launch.go @@ -231,12 +231,3 @@ func (state *launchState) createApp(ctx context.Context) (*fly.App, error) { return app, nil } - -func (state *launchState) getApp(ctx context.Context) (*fly.App, error) { - apiClient := flyutil.ClientFromContext(ctx) - app, err := apiClient.GetApp(ctx, state.Plan.AppName) - if err != nil { - return nil, err - } - return app, nil -} diff --git a/scanner/phoenix.go b/scanner/phoenix.go index a0d4ebf2a7..6b5ffd2b78 100644 --- a/scanner/phoenix.go +++ b/scanner/phoenix.go @@ -73,6 +73,34 @@ func configurePhoenix(sourceDir string, config *ScannerConfig) (*SourceInfo, err }, } + // This adds support on launch UI for repos with different .tool-versions + deployTrigger := os.Getenv("DEPLOY_TRIGGER") + if deployTrigger == "launch" && helpers.FileExists(filepath.Join(sourceDir, ".tool-versions")) { + cmd := exec.Command("asdf", "install") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err := cmd.Run() + if err != nil { + return nil, errors.Wrap(err, "We identified .tool-versions but after running `asdf install` we ran into some errors. Please check that your `asdf install` builds successfully and try again.") + } + + cmd = exec.Command("mix", "local.hex", "--force") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err = cmd.Run() + if err != nil { + return nil, errors.Wrap(err, "After installing your elixir version with asdf we found an error while running `mix local.hex --force`. Please confirm that running this command works locally and try again.") + } + + cmd = exec.Command("mix", "local.rebar", "--force") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err = cmd.Run() + if err != nil { + return nil, errors.Wrap(err, "After installing your elixir version with asdf we found an error while running `mix local.rebar --force`. Please confirm that running this command works locally and try again.") + } + } + // We found Phoenix, so check if the project compiles. cmd := exec.Command("mix", "do", "deps.get,", "compile") cmd.Stdout = os.Stdout diff --git a/scanner/rails.go b/scanner/rails.go index 0adeeea69b..43554fe4a4 100644 --- a/scanner/rails.go +++ b/scanner/rails.go @@ -444,10 +444,6 @@ The following comand can be used to update your Dockerfile: } } - if srcInfo.DatabaseDesired == DatabaseKindSqlite { - - } - // add HealthCheck (if found) srcInfo.HttpCheckPath = <-healthcheck_channel if srcInfo.HttpCheckPath != "" { diff --git a/test/deployer/deployer_test.go b/test/deployer/deployer_test.go index c3fad57fe8..f90420989c 100644 --- a/test/deployer/deployer_test.go +++ b/test/deployer/deployer_test.go @@ -396,6 +396,54 @@ func TestLaunchStatic(t *testing.T) { require.Contains(t, string(body), "Hello World") } +func TestDeployPhoenixSqlite(t *testing.T) { + deploy := testDeployer(t, + withFixtureApp("deploy-phoenix-sqlite"), + createRandomApp, + withOverwrittenConfig(func(d *testlib.DeployTestRun) map[string]any { + return map[string]any{ + "app": d.Extra["appName"], + "region": d.PrimaryRegion(), + "env": map[string]string{ + "TEST_ID": d.ID(), + }, + } + }), + testlib.DeployOnly, + testlib.DeployNow, + withWorkDirAppSource, + ) + + body, err := testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev", deploy.Extra["appName"].(string))) + require.NoError(t, err) + + require.Contains(t, string(body), "Phoenix") +} + +func TestDeployPhoenixSqliteWithCustomToolVersions(t *testing.T) { + deploy := testDeployer(t, + withFixtureApp("deploy-phoenix-sqlite-custom-tool-versions"), + createRandomApp, + withOverwrittenConfig(func(d *testlib.DeployTestRun) map[string]any { + return map[string]any{ + "app": d.Extra["appName"], + "region": d.PrimaryRegion(), + "env": map[string]string{ + "TEST_ID": d.ID(), + }, + } + }), + testlib.DeployOnly, + testlib.DeployNow, + withWorkDirAppSource, + ) + + body, err := testlib.RunHealthCheck(fmt.Sprintf("https://%s.fly.dev", deploy.Extra["appName"].(string))) + require.NoError(t, err) + + require.Contains(t, string(body), "Phoenix") +} + func createRandomApp(d *testlib.DeployTestRun) { appName := d.CreateRandomAppName() require.NotEmpty(d, appName) diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/.dockerignore b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/.dockerignore new file mode 100644 index 0000000000..61a73933c8 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/.dockerignore @@ -0,0 +1,45 @@ +# This file excludes paths from the Docker build context. +# +# By default, Docker's build context includes all files (and folders) in the +# current directory. Even if a file isn't copied into the container it is still sent to +# the Docker daemon. +# +# There are multiple reasons to exclude files from the build context: +# +# 1. Prevent nested folders from being copied into the container (ex: exclude +# /assets/node_modules when copying /assets) +# 2. Reduce the size of the build context and improve build time (ex. /build, /deps, /doc) +# 3. Avoid sending files containing sensitive information +# +# More information on using .dockerignore is available here: +# https://docs.docker.com/engine/reference/builder/#dockerignore-file + +.dockerignore + +# Ignore git, but keep git HEAD and refs to access current commit hash if needed: +# +# $ cat .git/HEAD | awk '{print ".git/"$2}' | xargs cat +# d0b8727759e1e0e7aa3d41707d12376e373d5ecc +.git +!.git/HEAD +!.git/refs + +# Common development/test artifacts +/cover/ +/doc/ +/test/ +/tmp/ +.elixir_ls + +# Mix artifacts +/_build/ +/deps/ +*.ez + +# Generated on crash by the VM +erl_crash.dump + +# Static artifacts - These should be fetched and built inside the Docker image +/assets/node_modules/ +/priv/static/assets/ +/priv/static/cache_manifest.json diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/.formatter.exs b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/.formatter.exs new file mode 100644 index 0000000000..8a6391c6a6 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/.formatter.exs @@ -0,0 +1,5 @@ +[ + import_deps: [:ecto, :phoenix], + inputs: ["*.{ex,exs}", "priv/*/seeds.exs", "{config,lib,test}/**/*.{ex,exs}"], + subdirectories: ["priv/*/migrations"] +] diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/.gitignore b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/.gitignore new file mode 100644 index 0000000000..ed6f936b80 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/.gitignore @@ -0,0 +1,36 @@ +# The directory Mix will write compiled artifacts to. +/_build/ +/.elixir_ls/ + +# If you run "mix test --cover", coverage assets end up here. +/cover/ + +# The directory Mix downloads your dependencies sources to. +/deps/ + +# Where 3rd-party dependencies like ExDoc output generated docs. +/doc/ + +# Ignore .fetch files in case you like to edit your project deps locally. +/.fetch + +# If the VM crashes, it generates a dump, let's ignore it too. +erl_crash.dump + +# Also ignore archive artifacts (built via "mix archive.build"). +*.ez + +# Ignore package tarball (built via "mix hex.build"). +hello_elixir-*.tar + +# Ignore assets that are produced by build tools. +/priv/static/assets/ + +# Ignore digested assets cache. +/priv/static/cache_manifest.json + +# In case you use Node.js/npm, you want to ignore these. +npm-debug.log +/assets/node_modules/ + +hello_elixir_dev.db* \ No newline at end of file diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/.tool-versions b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/.tool-versions new file mode 100644 index 0000000000..6dfe7625d7 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/.tool-versions @@ -0,0 +1,2 @@ +erlang 24.3.4.9 +elixir 1.12.3-otp-24 diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/Dockerfile b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/Dockerfile new file mode 100644 index 0000000000..fabe7c5c85 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/Dockerfile @@ -0,0 +1,95 @@ +# Find eligible builder and runner images on Docker Hub. We use Ubuntu/Debian instead of +# Alpine to avoid DNS resolution issues in production. +# +# https://hub.docker.com/r/hexpm/elixir/tags?page=1&name=ubuntu +# https://hub.docker.com/_/ubuntu?tab=tags +# +# +# This file is based on these images: +# +# - https://hub.docker.com/r/hexpm/elixir/tags - for the build image +# - https://hub.docker.com/_/debian?tab=tags&page=1&name=bullseye-20210902-slim - for the release image +# - https://pkgs.org/ - resource for finding needed packages +# - Ex: hexpm/elixir:1.13.3-erlang-24.0.5-debian-bullseye-20210902-slim +# +ARG ELIXIR_VERSION=1.13.3 +ARG OTP_VERSION=24.0.5 +ARG DEBIAN_VERSION=bullseye-20210902-slim + +ARG BUILDER_IMAGE="hexpm/elixir:${ELIXIR_VERSION}-erlang-${OTP_VERSION}-debian-${DEBIAN_VERSION}" +ARG RUNNER_IMAGE="debian:${DEBIAN_VERSION}" + +FROM ${BUILDER_IMAGE} as builder + +# install build dependencies +RUN apt-get update -y && apt-get install -y build-essential git \ + && apt-get clean && rm -f /var/lib/apt/lists/*_* + +# prepare build dir +WORKDIR /app + +# install hex + rebar +RUN mix local.hex --force && \ + mix local.rebar --force + +# set build ENV +ENV MIX_ENV="prod" + +# install mix dependencies +COPY mix.exs mix.lock ./ +RUN mix deps.get --only $MIX_ENV +RUN mkdir config + +# copy compile-time config files before we compile dependencies +# to ensure any relevant config change will trigger the dependencies +# to be re-compiled. +COPY config/config.exs config/${MIX_ENV}.exs config/ +RUN mix deps.compile + +COPY priv priv + +COPY lib lib + +COPY assets assets + +# compile assets +RUN mix assets.deploy + +# Compile the release +RUN mix compile + +# Changes to config/runtime.exs don't require recompiling the code +COPY config/runtime.exs config/ + +COPY rel rel +RUN mix release + +# start a new build stage so that the final image will only contain +# the compiled release and other runtime necessities +FROM ${RUNNER_IMAGE} + +RUN apt-get update -y && apt-get install -y libstdc++6 openssl libncurses5 locales sqlite3 \ + && apt-get clean && rm -f /var/lib/apt/lists/*_* + +# Set the locale +RUN sed -i '/en_US.UTF-8/s/^# //g' /etc/locale.gen && locale-gen + +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US:en +ENV LC_ALL en_US.UTF-8 + +WORKDIR "/app" +RUN chown nobody /app + +# set runner ENV +ENV MIX_ENV="prod" + +# Only copy the final release from the build stage +COPY --from=builder --chown=nobody:root /app/_build/${MIX_ENV}/rel/hello_elixir ./ + +USER nobody + +CMD ["/app/bin/server"] +# Appended by flyctl +ENV ECTO_IPV6 true +ENV ERL_AFLAGS "-proto_dist inet6_tcp" diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/LICENSE b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/README.md b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/README.md new file mode 100644 index 0000000000..20a1ae1d48 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/README.md @@ -0,0 +1,82 @@ +# Hello Elixir SQLite! + +Welcome to our Code Server for Phoenix Apps. + +## Development + +Right now this editor is running at ${FLY_CODE_URL}. + +You need to start the development server to see yout app running at ${FLY_DEVELOPMENT_URL}. + +```sh +mix phx.server +``` + +## Deploy + +Looks like we're ready to deploy! + +To deploy you just need to run `fly launch --no-deploy`, create your secret key and create a volume. + +Run `fly launch --no-deploy` and make sure to say yes to copy the configuration file +to the new app so you wont have to do anything. + +```sh +$ fly launch --no-deploy +An existing fly.toml file was found for app fly-elixir + +? Would you like to copy its configuration to the new app? Yes +Creating app in /home/coder/project +Scanning source code +Detected a Dockerfile app + +? App Name (leave blank to use an auto-generated name): your-app-name + +? Select organization: Lubien (personal) + +? Select region: gru (São Paulo) + +Created app sqlite-tests in organization personal +Wrote config file fly.toml +Your app is ready. Deploy with `flyctl deploy` +``` + +Let's got create your secret key. Elixir has a mix task that can generate a new +Phoenix key base secret. Let's use that. + +```bash +mix phx.gen.secret +``` + +It generates a long string of random text. Let's store that as a secret for our app. +When we run this command in our project folder, `flyctl` uses the `fly.toml` +file to know which app we are setting the value on. + +```sh +$ fly secrets set SECRET_KEY_BASE= +Secrets are staged for the first deployment +``` + +Now time to create a volume for your SQLite database. You will need to run +`fly volumes create database_data --region REGION_NAME`. Pick the same region +you chose on the previous command. + +```sh +$ fly volumes create database_data --size 1 --region gru + ID: vol_1g67340g9y9rydxw + Name: database_data + App: sqlite-tests + Region: gru + Zone: 2824 + Size GB: 1 + Encrypted: true +Created at: 18 Jan 22 11:18 UTC +``` + +Now go for the deploy! + +```sh +$ fly deploy +``` + +... will bring up your app! \ No newline at end of file diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/assets/css/app.css b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/assets/css/app.css new file mode 100644 index 0000000000..24920cf1ae --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/assets/css/app.css @@ -0,0 +1,89 @@ +/* This file is for your main application CSS */ +@import "./phoenix.css"; + +/* Alerts and form errors used by phx.new */ +.alert { + padding: 15px; + margin-bottom: 20px; + border: 1px solid transparent; + border-radius: 4px; +} +.alert-info { + color: #31708f; + background-color: #d9edf7; + border-color: #bce8f1; +} +.alert-warning { + color: #8a6d3b; + background-color: #fcf8e3; + border-color: #faebcc; +} +.alert-danger { + color: #a94442; + background-color: #f2dede; + border-color: #ebccd1; +} +.alert p { + margin-bottom: 0; +} +.alert:empty { + display: none; +} +.invalid-feedback { + color: #a94442; + display: block; + margin: -1rem 0 2rem; +} + +/* LiveView specific classes for your customization */ +.phx-no-feedback.invalid-feedback, +.phx-no-feedback .invalid-feedback { + display: none; +} + +.phx-click-loading { + opacity: 0.5; + transition: opacity 1s ease-out; +} + +.phx-disconnected{ + cursor: wait; +} +.phx-disconnected *{ + pointer-events: none; +} + +.phx-modal { + opacity: 1!important; + position: fixed; + z-index: 1; + left: 0; + top: 0; + width: 100%; + height: 100%; + overflow: auto; + background-color: rgb(0,0,0); + background-color: rgba(0,0,0,0.4); +} + +.phx-modal-content { + background-color: #fefefe; + margin: 15vh auto; + padding: 20px; + border: 1px solid #888; + width: 80%; +} + +.phx-modal-close { + color: #aaa; + float: right; + font-size: 28px; + font-weight: bold; +} + +.phx-modal-close:hover, +.phx-modal-close:focus { + color: black; + text-decoration: none; + cursor: pointer; +} diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/assets/css/phoenix.css b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/assets/css/phoenix.css new file mode 100644 index 0000000000..0d59050f89 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/assets/css/phoenix.css @@ -0,0 +1,101 @@ +/* Includes some default style for the starter application. + * This can be safely deleted to start fresh. + */ + +/* Milligram v1.4.1 https://milligram.github.io + * Copyright (c) 2020 CJ Patoilo Licensed under the MIT license + */ + +*,*:after,*:before{box-sizing:inherit}html{box-sizing:border-box;font-size:62.5%}body{color:#000000;font-family:'Helvetica Neue', 'Helvetica', 'Arial', sans-serif;font-size:1.6em;font-weight:300;letter-spacing:.01em;line-height:1.6}blockquote{border-left:0.3rem solid #d1d1d1;margin-left:0;margin-right:0;padding:1rem 1.5rem}blockquote *:last-child{margin-bottom:0}.button,button,input[type='button'],input[type='reset'],input[type='submit']{background-color:#0069d9;border:0.1rem solid #0069d9;border-radius:.4rem;color:#fff;cursor:pointer;display:inline-block;font-size:1.1rem;font-weight:700;height:3.8rem;letter-spacing:.1rem;line-height:3.8rem;padding:0 3.0rem;text-align:center;text-decoration:none;text-transform:uppercase;white-space:nowrap}.button:focus,.button:hover,button:focus,button:hover,input[type='button']:focus,input[type='button']:hover,input[type='reset']:focus,input[type='reset']:hover,input[type='submit']:focus,input[type='submit']:hover{background-color:#606c76;border-color:#606c76;color:#fff;outline:0}.button[disabled],button[disabled],input[type='button'][disabled],input[type='reset'][disabled],input[type='submit'][disabled]{cursor:default;opacity:.5}.button[disabled]:focus,.button[disabled]:hover,button[disabled]:focus,button[disabled]:hover,input[type='button'][disabled]:focus,input[type='button'][disabled]:hover,input[type='reset'][disabled]:focus,input[type='reset'][disabled]:hover,input[type='submit'][disabled]:focus,input[type='submit'][disabled]:hover{background-color:#0069d9;border-color:#0069d9}.button.button-outline,button.button-outline,input[type='button'].button-outline,input[type='reset'].button-outline,input[type='submit'].button-outline{background-color:transparent;color:#0069d9}.button.button-outline:focus,.button.button-outline:hover,button.button-outline:focus,button.button-outline:hover,input[type='button'].button-outline:focus,input[type='button'].button-outline:hover,input[type='reset'].button-outline:focus,input[type='reset'].button-outline:hover,input[type='submit'].button-outline:focus,input[type='submit'].button-outline:hover{background-color:transparent;border-color:#606c76;color:#606c76}.button.button-outline[disabled]:focus,.button.button-outline[disabled]:hover,button.button-outline[disabled]:focus,button.button-outline[disabled]:hover,input[type='button'].button-outline[disabled]:focus,input[type='button'].button-outline[disabled]:hover,input[type='reset'].button-outline[disabled]:focus,input[type='reset'].button-outline[disabled]:hover,input[type='submit'].button-outline[disabled]:focus,input[type='submit'].button-outline[disabled]:hover{border-color:inherit;color:#0069d9}.button.button-clear,button.button-clear,input[type='button'].button-clear,input[type='reset'].button-clear,input[type='submit'].button-clear{background-color:transparent;border-color:transparent;color:#0069d9}.button.button-clear:focus,.button.button-clear:hover,button.button-clear:focus,button.button-clear:hover,input[type='button'].button-clear:focus,input[type='button'].button-clear:hover,input[type='reset'].button-clear:focus,input[type='reset'].button-clear:hover,input[type='submit'].button-clear:focus,input[type='submit'].button-clear:hover{background-color:transparent;border-color:transparent;color:#606c76}.button.button-clear[disabled]:focus,.button.button-clear[disabled]:hover,button.button-clear[disabled]:focus,button.button-clear[disabled]:hover,input[type='button'].button-clear[disabled]:focus,input[type='button'].button-clear[disabled]:hover,input[type='reset'].button-clear[disabled]:focus,input[type='reset'].button-clear[disabled]:hover,input[type='submit'].button-clear[disabled]:focus,input[type='submit'].button-clear[disabled]:hover{color:#0069d9}code{background:#f4f5f6;border-radius:.4rem;font-size:86%;margin:0 .2rem;padding:.2rem .5rem;white-space:nowrap}pre{background:#f4f5f6;border-left:0.3rem solid #0069d9;overflow-y:hidden}pre>code{border-radius:0;display:block;padding:1rem 1.5rem;white-space:pre}hr{border:0;border-top:0.1rem solid #f4f5f6;margin:3.0rem 0}input[type='color'],input[type='date'],input[type='datetime'],input[type='datetime-local'],input[type='email'],input[type='month'],input[type='number'],input[type='password'],input[type='search'],input[type='tel'],input[type='text'],input[type='url'],input[type='week'],input:not([type]),textarea,select{-webkit-appearance:none;background-color:transparent;border:0.1rem solid #d1d1d1;border-radius:.4rem;box-shadow:none;box-sizing:inherit;height:3.8rem;padding:.6rem 1.0rem .7rem;width:100%}input[type='color']:focus,input[type='date']:focus,input[type='datetime']:focus,input[type='datetime-local']:focus,input[type='email']:focus,input[type='month']:focus,input[type='number']:focus,input[type='password']:focus,input[type='search']:focus,input[type='tel']:focus,input[type='text']:focus,input[type='url']:focus,input[type='week']:focus,input:not([type]):focus,textarea:focus,select:focus{border-color:#0069d9;outline:0}select{background:url('data:image/svg+xml;utf8,') center right no-repeat;padding-right:3.0rem}select:focus{background-image:url('data:image/svg+xml;utf8,')}select[multiple]{background:none;height:auto}textarea{min-height:6.5rem}label,legend{display:block;font-size:1.6rem;font-weight:700;margin-bottom:.5rem}fieldset{border-width:0;padding:0}input[type='checkbox'],input[type='radio']{display:inline}.label-inline{display:inline-block;font-weight:normal;margin-left:.5rem}.container{margin:0 auto;max-width:112.0rem;padding:0 2.0rem;position:relative;width:100%}.row{display:flex;flex-direction:column;padding:0;width:100%}.row.row-no-padding{padding:0}.row.row-no-padding>.column{padding:0}.row.row-wrap{flex-wrap:wrap}.row.row-top{align-items:flex-start}.row.row-bottom{align-items:flex-end}.row.row-center{align-items:center}.row.row-stretch{align-items:stretch}.row.row-baseline{align-items:baseline}.row .column{display:block;flex:1 1 auto;margin-left:0;max-width:100%;width:100%}.row .column.column-offset-10{margin-left:10%}.row .column.column-offset-20{margin-left:20%}.row .column.column-offset-25{margin-left:25%}.row .column.column-offset-33,.row .column.column-offset-34{margin-left:33.3333%}.row .column.column-offset-40{margin-left:40%}.row .column.column-offset-50{margin-left:50%}.row .column.column-offset-60{margin-left:60%}.row .column.column-offset-66,.row .column.column-offset-67{margin-left:66.6666%}.row .column.column-offset-75{margin-left:75%}.row .column.column-offset-80{margin-left:80%}.row .column.column-offset-90{margin-left:90%}.row .column.column-10{flex:0 0 10%;max-width:10%}.row .column.column-20{flex:0 0 20%;max-width:20%}.row .column.column-25{flex:0 0 25%;max-width:25%}.row .column.column-33,.row .column.column-34{flex:0 0 33.3333%;max-width:33.3333%}.row .column.column-40{flex:0 0 40%;max-width:40%}.row .column.column-50{flex:0 0 50%;max-width:50%}.row .column.column-60{flex:0 0 60%;max-width:60%}.row .column.column-66,.row .column.column-67{flex:0 0 66.6666%;max-width:66.6666%}.row .column.column-75{flex:0 0 75%;max-width:75%}.row .column.column-80{flex:0 0 80%;max-width:80%}.row .column.column-90{flex:0 0 90%;max-width:90%}.row .column .column-top{align-self:flex-start}.row .column .column-bottom{align-self:flex-end}.row .column .column-center{align-self:center}@media (min-width: 40rem){.row{flex-direction:row;margin-left:-1.0rem;width:calc(100% + 2.0rem)}.row .column{margin-bottom:inherit;padding:0 1.0rem}}a{color:#0069d9;text-decoration:none}a:focus,a:hover{color:#606c76}dl,ol,ul{list-style:none;margin-top:0;padding-left:0}dl dl,dl ol,dl ul,ol dl,ol ol,ol ul,ul dl,ul ol,ul ul{font-size:90%;margin:1.5rem 0 1.5rem 3.0rem}ol{list-style:decimal inside}ul{list-style:circle inside}.button,button,dd,dt,li{margin-bottom:1.0rem}fieldset,input,select,textarea{margin-bottom:1.5rem}blockquote,dl,figure,form,ol,p,pre,table,ul{margin-bottom:2.5rem}table{border-spacing:0;display:block;overflow-x:auto;text-align:left;width:100%}td,th{border-bottom:0.1rem solid #e1e1e1;padding:1.2rem 1.5rem}td:first-child,th:first-child{padding-left:0}td:last-child,th:last-child{padding-right:0}@media (min-width: 40rem){table{display:table;overflow-x:initial}}b,strong{font-weight:bold}p{margin-top:0}h1,h2,h3,h4,h5,h6{font-weight:300;letter-spacing:-.1rem;margin-bottom:2.0rem;margin-top:0}h1{font-size:4.6rem;line-height:1.2}h2{font-size:3.6rem;line-height:1.25}h3{font-size:2.8rem;line-height:1.3}h4{font-size:2.2rem;letter-spacing:-.08rem;line-height:1.35}h5{font-size:1.8rem;letter-spacing:-.05rem;line-height:1.5}h6{font-size:1.6rem;letter-spacing:0;line-height:1.4}img{max-width:100%}.clearfix:after{clear:both;content:' ';display:table}.float-left{float:left}.float-right{float:right} + +/* General style */ +h1{font-size: 3.6rem; line-height: 1.25} +h2{font-size: 2.8rem; line-height: 1.3} +h3{font-size: 2.2rem; letter-spacing: -.08rem; line-height: 1.35} +h4{font-size: 1.8rem; letter-spacing: -.05rem; line-height: 1.5} +h5{font-size: 1.6rem; letter-spacing: 0; line-height: 1.4} +h6{font-size: 1.4rem; letter-spacing: 0; line-height: 1.2} +pre{padding: 1em;} + +.container{ + margin: 0 auto; + max-width: 80.0rem; + padding: 0 2.0rem; + position: relative; + width: 100% +} +select { + width: auto; +} + +/* Phoenix promo and logo */ +.phx-hero { + text-align: center; + border-bottom: 1px solid #e3e3e3; + background: #eee; + border-radius: 6px; + padding: 3em 3em 1em; + margin-bottom: 3rem; + font-weight: 200; + font-size: 120%; +} +.phx-hero input { + background: #ffffff; +} +.phx-logo { + min-width: 300px; + margin: 1rem; + display: block; +} +.phx-logo img { + width: auto; + display: block; +} + +/* Headers */ +header { + width: 100%; + background: #fdfdfd; + border-bottom: 1px solid #eaeaea; + margin-bottom: 2rem; +} +header section { + align-items: center; + display: flex; + flex-direction: column; + justify-content: space-between; +} +header section :first-child { + order: 2; +} +header section :last-child { + order: 1; +} +header nav ul, +header nav li { + margin: 0; + padding: 0; + display: block; + text-align: right; + white-space: nowrap; +} +header nav ul { + margin: 1rem; + margin-top: 0; +} +header nav a { + display: block; +} + +@media (min-width: 40.0rem) { /* Small devices (landscape phones, 576px and up) */ + header section { + flex-direction: row; + } + header nav ul { + margin: 1rem; + } + .phx-logo { + flex-basis: 527px; + margin: 2rem 1rem; + } +} diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/assets/js/app.js b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/assets/js/app.js new file mode 100644 index 0000000000..9eabcff9d3 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/assets/js/app.js @@ -0,0 +1,44 @@ +// We import the CSS which is extracted to its own file by esbuild. +// Remove this line if you add a your own CSS build pipeline (e.g postcss). +import "../css/app.css" + +// If you want to use Phoenix channels, run `mix help phx.gen.channel` +// to get started and then uncomment the line below. +// import "./user_socket.js" + +// You can include dependencies in two ways. +// +// The simplest option is to put them in assets/vendor and +// import them using relative paths: +// +// import "./vendor/some-package.js" +// +// Alternatively, you can `npm install some-package` and import +// them using a path starting with the package name: +// +// import "some-package" +// + +// Include phoenix_html to handle method=PUT/DELETE in forms and buttons. +import "phoenix_html" +// Establish Phoenix Socket and LiveView configuration. +import {Socket} from "phoenix" +import {LiveSocket} from "phoenix_live_view" +import topbar from "../vendor/topbar" + +let csrfToken = document.querySelector("meta[name='csrf-token']").getAttribute("content") +let liveSocket = new LiveSocket("/live", Socket, {params: {_csrf_token: csrfToken}}) + +// Show progress bar on live navigation and form submits +topbar.config({barColors: {0: "#29d"}, shadowColor: "rgba(0, 0, 0, .3)"}) +window.addEventListener("phx:page-loading-start", info => topbar.show()) +window.addEventListener("phx:page-loading-stop", info => topbar.hide()) + +// connect if there are any LiveViews on the page +liveSocket.connect() + +// expose liveSocket on window for web console debug logs and latency simulation: +// >> liveSocket.enableDebug() +// >> liveSocket.enableLatencySim(1000) // enabled for duration of browser session +// >> liveSocket.disableLatencySim() +window.liveSocket = liveSocket diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/assets/vendor/topbar.js b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/assets/vendor/topbar.js new file mode 100644 index 0000000000..ff7fbb6709 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/assets/vendor/topbar.js @@ -0,0 +1,157 @@ +/** + * @license MIT + * topbar 1.0.0, 2021-01-06 + * http://buunguyen.github.io/topbar + * Copyright (c) 2021 Buu Nguyen + */ +(function (window, document) { + "use strict"; + + // https://gist.github.com/paulirish/1579671 + (function () { + var lastTime = 0; + var vendors = ["ms", "moz", "webkit", "o"]; + for (var x = 0; x < vendors.length && !window.requestAnimationFrame; ++x) { + window.requestAnimationFrame = + window[vendors[x] + "RequestAnimationFrame"]; + window.cancelAnimationFrame = + window[vendors[x] + "CancelAnimationFrame"] || + window[vendors[x] + "CancelRequestAnimationFrame"]; + } + if (!window.requestAnimationFrame) + window.requestAnimationFrame = function (callback, element) { + var currTime = new Date().getTime(); + var timeToCall = Math.max(0, 16 - (currTime - lastTime)); + var id = window.setTimeout(function () { + callback(currTime + timeToCall); + }, timeToCall); + lastTime = currTime + timeToCall; + return id; + }; + if (!window.cancelAnimationFrame) + window.cancelAnimationFrame = function (id) { + clearTimeout(id); + }; + })(); + + var canvas, + progressTimerId, + fadeTimerId, + currentProgress, + showing, + addEvent = function (elem, type, handler) { + if (elem.addEventListener) elem.addEventListener(type, handler, false); + else if (elem.attachEvent) elem.attachEvent("on" + type, handler); + else elem["on" + type] = handler; + }, + options = { + autoRun: true, + barThickness: 3, + barColors: { + 0: "rgba(26, 188, 156, .9)", + ".25": "rgba(52, 152, 219, .9)", + ".50": "rgba(241, 196, 15, .9)", + ".75": "rgba(230, 126, 34, .9)", + "1.0": "rgba(211, 84, 0, .9)", + }, + shadowBlur: 10, + shadowColor: "rgba(0, 0, 0, .6)", + className: null, + }, + repaint = function () { + canvas.width = window.innerWidth; + canvas.height = options.barThickness * 5; // need space for shadow + + var ctx = canvas.getContext("2d"); + ctx.shadowBlur = options.shadowBlur; + ctx.shadowColor = options.shadowColor; + + var lineGradient = ctx.createLinearGradient(0, 0, canvas.width, 0); + for (var stop in options.barColors) + lineGradient.addColorStop(stop, options.barColors[stop]); + ctx.lineWidth = options.barThickness; + ctx.beginPath(); + ctx.moveTo(0, options.barThickness / 2); + ctx.lineTo( + Math.ceil(currentProgress * canvas.width), + options.barThickness / 2 + ); + ctx.strokeStyle = lineGradient; + ctx.stroke(); + }, + createCanvas = function () { + canvas = document.createElement("canvas"); + var style = canvas.style; + style.position = "fixed"; + style.top = style.left = style.right = style.margin = style.padding = 0; + style.zIndex = 100001; + style.display = "none"; + if (options.className) canvas.classList.add(options.className); + document.body.appendChild(canvas); + addEvent(window, "resize", repaint); + }, + topbar = { + config: function (opts) { + for (var key in opts) + if (options.hasOwnProperty(key)) options[key] = opts[key]; + }, + show: function () { + if (showing) return; + showing = true; + if (fadeTimerId !== null) window.cancelAnimationFrame(fadeTimerId); + if (!canvas) createCanvas(); + canvas.style.opacity = 1; + canvas.style.display = "block"; + topbar.progress(0); + if (options.autoRun) { + (function loop() { + progressTimerId = window.requestAnimationFrame(loop); + topbar.progress( + "+" + 0.05 * Math.pow(1 - Math.sqrt(currentProgress), 2) + ); + })(); + } + }, + progress: function (to) { + if (typeof to === "undefined") return currentProgress; + if (typeof to === "string") { + to = + (to.indexOf("+") >= 0 || to.indexOf("-") >= 0 + ? currentProgress + : 0) + parseFloat(to); + } + currentProgress = to > 1 ? 1 : to; + repaint(); + return currentProgress; + }, + hide: function () { + if (!showing) return; + showing = false; + if (progressTimerId != null) { + window.cancelAnimationFrame(progressTimerId); + progressTimerId = null; + } + (function loop() { + if (topbar.progress("+.1") >= 1) { + canvas.style.opacity -= 0.05; + if (canvas.style.opacity <= 0.05) { + canvas.style.display = "none"; + fadeTimerId = null; + return; + } + } + fadeTimerId = window.requestAnimationFrame(loop); + })(); + }, + }; + + if (typeof module === "object" && typeof module.exports === "object") { + module.exports = topbar; + } else if (typeof define === "function" && define.amd) { + define(function () { + return topbar; + }); + } else { + this.topbar = topbar; + } +}.call(this, window, document)); diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/config/config.exs b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/config/config.exs new file mode 100644 index 0000000000..4a1915fd83 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/config/config.exs @@ -0,0 +1,51 @@ +# This file is responsible for configuring your application +# and its dependencies with the aid of the Config module. +# +# This configuration file is loaded before any dependency and +# is restricted to this project. + +# General application configuration +import Config + +config :hello_elixir, + ecto_repos: [HelloElixir.Repo] + +# Configures the endpoint +config :hello_elixir, HelloElixirWeb.Endpoint, + url: [host: "localhost"], + render_errors: [view: HelloElixirWeb.ErrorView, accepts: ~w(html json), layout: false], + pubsub_server: HelloElixir.PubSub, + live_view: [signing_salt: "O1MdfPrK"] + +# Configures the mailer +# +# By default it uses the "Local" adapter which stores the emails +# locally. You can see the emails in your browser, at "/dev/mailbox". +# +# For production it's recommended to configure a different adapter +# at the `config/runtime.exs`. +config :hello_elixir, HelloElixir.Mailer, adapter: Swoosh.Adapters.Local + +# Swoosh API client is needed for adapters other than SMTP. +config :swoosh, :api_client, false + +# Configure esbuild (the version is required) +config :esbuild, + version: "0.12.18", + default: [ + args: ~w(js/app.js --bundle --target=es2016 --outdir=../priv/static/assets), + cd: Path.expand("../assets", __DIR__), + env: %{"NODE_PATH" => Path.expand("../deps", __DIR__)} + ] + +# Configures Elixir's Logger +config :logger, :console, + format: "$time $metadata[$level] $message\n", + metadata: [:request_id] + +# Use Jason for JSON parsing in Phoenix +config :phoenix, :json_library, Jason + +# Import environment specific config. This must remain at the bottom +# of this file so it overrides the configuration defined above. +import_config "#{config_env()}.exs" diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/config/dev.exs b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/config/dev.exs new file mode 100644 index 0000000000..0b4dcc3ff4 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/config/dev.exs @@ -0,0 +1,71 @@ +import Config + +# Configure your database +config :hello_elixir, HelloElixir.Repo, + database: Path.expand("../hello_elixir_dev.db", Path.dirname(__ENV__.file)), + pool_size: 5, + show_sensitive_data_on_connection_error: true + +# For development, we disable any cache and enable +# debugging and code reloading. +# +# The watchers configuration can be used to run external +# watchers to your application. For example, we use it +# with esbuild to bundle .js and .css sources. +config :hello_elixir, HelloElixirWeb.Endpoint, + # Binding to loopback ipv4 address prevents access from other machines. + # Change to `ip: {0, 0, 0, 0}` to allow access from other machines. + http: [ip: {0, 0, 0, 0, 0, 0, 0, 0}, port: 4000], + check_origin: false, + code_reloader: true, + debug_errors: true, + secret_key_base: "04klpwfSfXhaJdwtinv6ScP9dT78hgU+8NxRzgjDi52celjU3UtqeVy9Sv057XH6", + watchers: [ + # Start the esbuild watcher by calling Esbuild.install_and_run(:default, args) + esbuild: {Esbuild, :install_and_run, [:default, ~w(--sourcemap=inline --watch)]} + ] + +# ## SSL Support +# +# In order to use HTTPS in development, a self-signed +# certificate can be generated by running the following +# Mix task: +# +# mix phx.gen.cert +# +# Note that this task requires Erlang/OTP 20 or later. +# Run `mix help phx.gen.cert` for more information. +# +# The `http:` config above can be replaced with: +# +# https: [ +# port: 4001, +# cipher_suite: :strong, +# keyfile: "priv/cert/selfsigned_key.pem", +# certfile: "priv/cert/selfsigned.pem" +# ], +# +# If desired, both `http:` and `https:` keys can be +# configured to run both http and https servers on +# different ports. + +# Watch static and templates for browser reloading. +config :hello_elixir, HelloElixirWeb.Endpoint, + live_reload: [ + patterns: [ + ~r"priv/static/.*(js|css|png|jpeg|jpg|gif|svg)$", + ~r"priv/gettext/.*(po)$", + ~r"lib/hello_elixir_web/(live|views)/.*(ex)$", + ~r"lib/hello_elixir_web/templates/.*(eex)$" + ] + ] + +# Do not include metadata nor timestamps in development logs +config :logger, :console, format: "[$level] $message\n" + +# Set a higher stacktrace during development. Avoid configuring such +# in production as building large stacktraces may be expensive. +config :phoenix, :stacktrace_depth, 20 + +# Initialize plugs at runtime for faster development compilation +config :phoenix, :plug_init_mode, :runtime diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/config/prod.exs b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/config/prod.exs new file mode 100644 index 0000000000..bbe0c46f5a --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/config/prod.exs @@ -0,0 +1,51 @@ +import Config + +# For production, don't forget to configure the url host +# to something meaningful, Phoenix uses this information +# when generating URLs. +# +# Note we also include the path to a cache manifest +# containing the digested version of static files. This +# manifest is generated by the `mix phx.digest` task, +# which you should run after static files are built and +# before starting your production server. +config :hello_elixir, HelloElixirWeb.Endpoint, + url: [host: "example.com", port: 80], + cache_static_manifest: "priv/static/cache_manifest.json" + +# Do not print debug messages in production +config :logger, level: :info + +# ## SSL Support +# +# To get SSL working, you will need to add the `https` key +# to the previous section and set your `:url` port to 443: +# +# config :hello_elixir, HelloElixirWeb.Endpoint, +# ..., +# url: [host: "example.com", port: 443], +# https: [ +# ..., +# port: 443, +# cipher_suite: :strong, +# keyfile: System.get_env("SOME_APP_SSL_KEY_PATH"), +# certfile: System.get_env("SOME_APP_SSL_CERT_PATH") +# ] +# +# The `cipher_suite` is set to `:strong` to support only the +# latest and more secure SSL ciphers. This means old browsers +# and clients may not be supported. You can set it to +# `:compatible` for wider support. +# +# `:keyfile` and `:certfile` expect an absolute path to the key +# and cert in disk or a relative path inside priv, for example +# "priv/ssl/server.key". For all supported SSL configuration +# options, see https://hexdocs.pm/plug/Plug.SSL.html#configure/1 +# +# We also recommend setting `force_ssl` in your endpoint, ensuring +# no data is ever sent via http, always redirecting to https: +# +# config :hello_elixir, HelloElixirWeb.Endpoint, +# force_ssl: [hsts: true] +# +# Check `Plug.SSL` for all available options in `force_ssl`. diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/config/runtime.exs b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/config/runtime.exs new file mode 100644 index 0000000000..c3f2bd1924 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/config/runtime.exs @@ -0,0 +1,85 @@ +import Config + +# config/runtime.exs is executed for all environments, including +# during releases. It is executed after compilation and before the +# system starts, so it is typically used to load production configuration +# and secrets from environment variables or elsewhere. Do not define +# any compile-time configuration in here, as it won't be applied. +# The block below contains prod specific runtime configuration. +if config_env() == :prod do + database_path = + System.get_env("DATABASE_PATH") || + raise """ + environment variable DATABASE_PATH is missing. + For example: /etc/hello_elixir/hello_elixir.db + """ + + config :hello_elixir, HelloElixir.Repo, + database: database_path, + pool_size: String.to_integer(System.get_env("POOL_SIZE") || "5") + + # The secret key base is used to sign/encrypt cookies and other secrets. + # A default value is used in config/dev.exs and config/test.exs but you + # want to use a different value for prod and you most likely don't want + # to check this value into version control, so we use an environment + # variable instead. + secret_key_base = + System.get_env("SECRET_KEY_BASE") || + raise """ + environment variable SECRET_KEY_BASE is missing. + You can generate one by calling: mix phx.gen.secret + """ + + app_name = + System.get_env("FLY_APP_NAME") || + raise "FLY_APP_NAME not available" + host = "#{app_name}.fly.dev" + port = String.to_integer(System.get_env("PORT") || "4000") + + config :hello_elixir, HelloElixirWeb.Endpoint, + url: [host: "#{app_name}.fly.dev", port: port], + http: [ + # Enable IPv6 and bind on all interfaces. + # Set it to {0, 0, 0, 0, 0, 0, 0, 1} for local network only access. + # See the documentation on https://hexdocs.pm/plug_cowboy/Plug.Cowboy.html + # for details about using IPv6 vs IPv4 and loopback vs public addresses. + ip: {0, 0, 0, 0, 0, 0, 0, 0}, + port: String.to_integer(System.get_env("PORT") || "4000") + ], + secret_key_base: secret_key_base + + # ## Using releases + # + # If you are doing OTP releases, you need to instruct Phoenix + # to start each relevant endpoint: + # + config :hello_elixir, HelloElixirWeb.Endpoint, server: true + + # ## Configuring the mailer + # + # In production you need to configure the mailer to use a different adapter. + # Also, you may need to configure the Swoosh API client of your choice if you + # are not using SMTP. Here is an example of the configuration: + # + # config :hello_elixir, HelloElixir.Mailer, + # adapter: Swoosh.Adapters.Mailgun, + # api_key: System.get_env("MAILGUN_API_KEY"), + # domain: System.get_env("MAILGUN_DOMAIN") + # + # For this example you need include a HTTP client required by Swoosh API client. + # Swoosh supports Hackney and Finch out of the box: + # + # config :swoosh, :api_client, Swoosh.ApiClient.Hackney + # + # See https://hexdocs.pm/swoosh/Swoosh.html#module-installation for details. +end + +if config_env() == :dev do + database_url = System.get_env("DATABASE_URL") + + if database_url != nil do + config :hello_elixir, HelloElixir.Repo, + url: database_url, + socket_options: [:inet6] + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/config/test.exs b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/config/test.exs new file mode 100644 index 0000000000..edc3766467 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/config/test.exs @@ -0,0 +1,30 @@ +import Config + +# Configure your database +# +# The MIX_TEST_PARTITION environment variable can be used +# to provide built-in test partitioning in CI environment. +# Run `mix help test` for more information. +config :hello_elixir, HelloElixir.Repo, + username: "postgres", + password: "postgres", + database: "hello_elixir_test#{System.get_env("MIX_TEST_PARTITION")}", + hostname: "localhost", + pool: Ecto.Adapters.SQL.Sandbox, + pool_size: 10 + +# We don't run a server during test. If one is required, +# you can enable the server option below. +config :hello_elixir, HelloElixirWeb.Endpoint, + http: [ip: {127, 0, 0, 1}, port: 4002], + secret_key_base: "ZSboDtvMYxxeMHqCOlTGvFkIAXNXUONkHC3mt3CE+34iYHClSKlJ1FMxA3/oK8lG", + server: false + +# In test we don't send emails. +config :hello_elixir, HelloElixir.Mailer, adapter: Swoosh.Adapters.Test + +# Print only warnings and errors during test +config :logger, level: :warn + +# Initialize plugs at runtime for faster test compilation +config :phoenix, :plug_init_mode, :runtime diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/entrypoint.sh b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/entrypoint.sh new file mode 100755 index 0000000000..d4def0a0b8 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/entrypoint.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +if [ ! -f /data/prod.db ]; then + echo "Creating database file" + sqlite3 /data/prod.db +fi + +/app/entry eval HelloElixir.Release.migrate && \ + /app/entry start \ No newline at end of file diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/fly.toml b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/fly.toml new file mode 100644 index 0000000000..59c9ae6da5 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/fly.toml @@ -0,0 +1,55 @@ + +kill_signal = 'SIGTERM' +kill_timeout = '5s' + +[experimental] + auto_rollback = true + +[build] + +[env] + DATABASE_PATH = '/mnt/name/name.db' + PHX_HOST = 'deploy-phoenix-sqlite-custom-tool-versions.fly.dev' + PORT = '8080' + SECRET_KEY_BASE = '/28BVC30oMsrUtq0VMBmfxF7zQhjEELRUoNtJOvyEOj7P5YbB7FN6S47KkWyQNcv' + +[[mounts]] + source = 'name' + destination = '/mnt/name' + +[http_service] + internal_port = 8080 + force_https = true + auto_stop_machines = 'stop' + auto_start_machines = true + min_machines_running = 0 + processes = ['app'] + + [http_service.concurrency] + type = 'connections' + hard_limit = 1000 + soft_limit = 1000 + +[[services]] + protocol = 'tcp' + internal_port = 8080 + processes = ['app'] + + [[services.ports]] + port = 80 + handlers = ['http'] + + [[services.ports]] + port = 443 + handlers = ['tls', 'http'] + + [services.concurrency] + type = 'connections' + hard_limit = 25 + soft_limit = 20 + +[[vm]] + memory = '1gb' + cpu_kind = 'shared' + cpus = 1 + memory_mb = 1024 diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir.ex b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir.ex new file mode 100644 index 0000000000..b45535df51 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir.ex @@ -0,0 +1,9 @@ +defmodule HelloElixir do + @moduledoc """ + HelloElixir keeps the contexts that define your domain + and business logic. + + Contexts are also responsible for managing your data, regardless + if it comes from the database, an external API or others. + """ +end diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir/application.ex b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir/application.ex new file mode 100644 index 0000000000..e98c8a7e89 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir/application.ex @@ -0,0 +1,36 @@ +defmodule HelloElixir.Application do + # See https://hexdocs.pm/elixir/Application.html + # for more information on OTP Applications + @moduledoc false + + use Application + + @impl true + def start(_type, _args) do + children = [ + # Start the Ecto repository + HelloElixir.Repo, + # Start the Telemetry supervisor + HelloElixirWeb.Telemetry, + # Start the PubSub system + {Phoenix.PubSub, name: HelloElixir.PubSub}, + # Start the Endpoint (http/https) + HelloElixirWeb.Endpoint + # Start a worker by calling: HelloElixir.Worker.start_link(arg) + # {HelloElixir.Worker, arg} + ] + + # See https://hexdocs.pm/elixir/Supervisor.html + # for other strategies and supported options + opts = [strategy: :one_for_one, name: HelloElixir.Supervisor] + Supervisor.start_link(children, opts) + end + + # Tell Phoenix to update the endpoint configuration + # whenever the application is updated. + @impl true + def config_change(changed, _new, removed) do + HelloElixirWeb.Endpoint.config_change(changed, removed) + :ok + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir/mailer.ex b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir/mailer.ex new file mode 100644 index 0000000000..12967a7ed3 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir/mailer.ex @@ -0,0 +1,3 @@ +defmodule HelloElixir.Mailer do + use Swoosh.Mailer, otp_app: :hello_elixir +end diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir/release.ex b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir/release.ex new file mode 100644 index 0000000000..9208e1cc87 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir/release.ex @@ -0,0 +1,28 @@ +defmodule HelloElixir.Release do + @moduledoc """ + Used for executing DB release tasks when run in production without Mix + installed. + """ + @app :hello_elixir + + def migrate do + load_app() + + for repo <- repos() do + {:ok, _, _} = Ecto.Migrator.with_repo(repo, &Ecto.Migrator.run(&1, :up, all: true)) + end + end + + def rollback(repo, version) do + load_app() + {:ok, _, _} = Ecto.Migrator.with_repo(repo, &Ecto.Migrator.run(&1, :down, to: version)) + end + + defp repos do + Application.fetch_env!(@app, :ecto_repos) + end + + defp load_app do + Application.load(@app) + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir/repo.ex b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir/repo.ex new file mode 100644 index 0000000000..8d5e8e7da5 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir/repo.ex @@ -0,0 +1,5 @@ +defmodule HelloElixir.Repo do + use Ecto.Repo, + otp_app: :hello_elixir, + adapter: Ecto.Adapters.SQLite3 +end diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web.ex b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web.ex new file mode 100644 index 0000000000..f7a1ca26ff --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web.ex @@ -0,0 +1,102 @@ +defmodule HelloElixirWeb do + @moduledoc """ + The entrypoint for defining your web interface, such + as controllers, views, channels and so on. + + This can be used in your application as: + + use HelloElixirWeb, :controller + use HelloElixirWeb, :view + + The definitions below will be executed for every view, + controller, etc, so keep them short and clean, focused + on imports, uses and aliases. + + Do NOT define functions inside the quoted expressions + below. Instead, define any helper function in modules + and import those modules here. + """ + + def controller do + quote do + use Phoenix.Controller, namespace: HelloElixirWeb + + import Plug.Conn + import HelloElixirWeb.Gettext + alias HelloElixirWeb.Router.Helpers, as: Routes + end + end + + def view do + quote do + use Phoenix.View, + root: "lib/hello_elixir_web/templates", + namespace: HelloElixirWeb + + # Import convenience functions from controllers + import Phoenix.Controller, + only: [get_flash: 1, get_flash: 2, view_module: 1, view_template: 1] + + # Include shared imports and aliases for views + unquote(view_helpers()) + end + end + + def live_view do + quote do + use Phoenix.LiveView, + layout: {HelloElixirWeb.LayoutView, "live.html"} + + unquote(view_helpers()) + end + end + + def live_component do + quote do + use Phoenix.LiveComponent + + unquote(view_helpers()) + end + end + + def router do + quote do + use Phoenix.Router + + import Plug.Conn + import Phoenix.Controller + import Phoenix.LiveView.Router + end + end + + def channel do + quote do + use Phoenix.Channel + import HelloElixirWeb.Gettext + end + end + + defp view_helpers do + quote do + # Use all HTML functionality (forms, tags, etc) + use Phoenix.HTML + + # Import LiveView and .heex helpers (live_render, live_patch, <.form>, etc) + import Phoenix.LiveView.Helpers + + # Import basic rendering functionality (render, render_layout, etc) + import Phoenix.View + + import HelloElixirWeb.ErrorHelpers + import HelloElixirWeb.Gettext + alias HelloElixirWeb.Router.Helpers, as: Routes + end + end + + @doc """ + When used, dispatch to the appropriate controller/view/etc. + """ + defmacro __using__(which) when is_atom(which) do + apply(__MODULE__, which, []) + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/controllers/page_controller.ex b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/controllers/page_controller.ex new file mode 100644 index 0000000000..a989f86306 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/controllers/page_controller.ex @@ -0,0 +1,7 @@ +defmodule HelloElixirWeb.PageController do + use HelloElixirWeb, :controller + + def index(conn, _params) do + render(conn, "index.html") + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/endpoint.ex b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/endpoint.ex new file mode 100644 index 0000000000..d2fba4562e --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/endpoint.ex @@ -0,0 +1,50 @@ +defmodule HelloElixirWeb.Endpoint do + use Phoenix.Endpoint, otp_app: :hello_elixir + + # The session will be stored in the cookie and signed, + # this means its contents can be read but not tampered with. + # Set :encryption_salt if you would also like to encrypt it. + @session_options [ + store: :cookie, + key: "_hello_elixir_key", + signing_salt: "eCMCnkFM" + ] + + socket "/live", Phoenix.LiveView.Socket, websocket: [connect_info: [session: @session_options]] + + # Serve at "/" the static files from "priv/static" directory. + # + # You should set gzip to true if you are running phx.digest + # when deploying your static files in production. + plug Plug.Static, + at: "/", + from: :hello_elixir, + gzip: false, + only: ~w(assets fonts images favicon.ico robots.txt) + + # Code reloading can be explicitly enabled under the + # :code_reloader configuration of your endpoint. + if code_reloading? do + socket "/phoenix/live_reload/socket", Phoenix.LiveReloader.Socket + plug Phoenix.LiveReloader + plug Phoenix.CodeReloader + plug Phoenix.Ecto.CheckRepoStatus, otp_app: :hello_elixir + end + + plug Phoenix.LiveDashboard.RequestLogger, + param_key: "request_logger", + cookie_key: "request_logger" + + plug Plug.RequestId + plug Plug.Telemetry, event_prefix: [:phoenix, :endpoint] + + plug Plug.Parsers, + parsers: [:urlencoded, :multipart, :json], + pass: ["*/*"], + json_decoder: Phoenix.json_library() + + plug Plug.MethodOverride + plug Plug.Head + plug Plug.Session, @session_options + plug HelloElixirWeb.Router +end diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/gettext.ex b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/gettext.ex new file mode 100644 index 0000000000..9c7b951acf --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/gettext.ex @@ -0,0 +1,24 @@ +defmodule HelloElixirWeb.Gettext do + @moduledoc """ + A module providing Internationalization with a gettext-based API. + + By using [Gettext](https://hexdocs.pm/gettext), + your module gains a set of macros for translations, for example: + + import HelloElixirWeb.Gettext + + # Simple translation + gettext("Here is the string to translate") + + # Plural translation + ngettext("Here is the string to translate", + "Here are the strings to translate", + 3) + + # Domain-based translation + dgettext("errors", "Here is the error message to translate") + + See the [Gettext Docs](https://hexdocs.pm/gettext) for detailed usage. + """ + use Gettext, otp_app: :hello_elixir +end diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/router.ex b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/router.ex new file mode 100644 index 0000000000..7d36000513 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/router.ex @@ -0,0 +1,55 @@ +defmodule HelloElixirWeb.Router do + use HelloElixirWeb, :router + + pipeline :browser do + plug :accepts, ["html"] + plug :fetch_session + plug :fetch_live_flash + plug :put_root_layout, {HelloElixirWeb.LayoutView, :root} + plug :protect_from_forgery + plug :put_secure_browser_headers + end + + pipeline :api do + plug :accepts, ["json"] + end + + scope "/", HelloElixirWeb do + pipe_through :browser + + get "/", PageController, :index + end + + # Other scopes may use custom stacks. + # scope "/api", HelloElixirWeb do + # pipe_through :api + # end + + # Enables LiveDashboard only for development + # + # If you want to use the LiveDashboard in production, you should put + # it behind authentication and allow only admins to access it. + # If your application does not have an admins-only section yet, + # you can use Plug.BasicAuth to set up some basic authentication + # as long as you are also using SSL (which you should anyway). + if Mix.env() in [:dev, :test] do + import Phoenix.LiveDashboard.Router + + scope "/" do + pipe_through :browser + live_dashboard "/dashboard", metrics: HelloElixirWeb.Telemetry + end + end + + # Enables the Swoosh mailbox preview in development. + # + # Note that preview only shows emails that were sent by the same + # node running the Phoenix server. + if Mix.env() == :dev do + scope "/dev" do + pipe_through :browser + + forward "/mailbox", Plug.Swoosh.MailboxPreview + end + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/telemetry.ex b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/telemetry.ex new file mode 100644 index 0000000000..55a252c0df --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/telemetry.ex @@ -0,0 +1,71 @@ +defmodule HelloElixirWeb.Telemetry do + use Supervisor + import Telemetry.Metrics + + def start_link(arg) do + Supervisor.start_link(__MODULE__, arg, name: __MODULE__) + end + + @impl true + def init(_arg) do + children = [ + # Telemetry poller will execute the given period measurements + # every 10_000ms. Learn more here: https://hexdocs.pm/telemetry_metrics + {:telemetry_poller, measurements: periodic_measurements(), period: 10_000} + # Add reporters as children of your supervision tree. + # {Telemetry.Metrics.ConsoleReporter, metrics: metrics()} + ] + + Supervisor.init(children, strategy: :one_for_one) + end + + def metrics do + [ + # Phoenix Metrics + summary("phoenix.endpoint.stop.duration", + unit: {:native, :millisecond} + ), + summary("phoenix.router_dispatch.stop.duration", + tags: [:route], + unit: {:native, :millisecond} + ), + + # Database Metrics + summary("hello_elixir.repo.query.total_time", + unit: {:native, :millisecond}, + description: "The sum of the other measurements" + ), + summary("hello_elixir.repo.query.decode_time", + unit: {:native, :millisecond}, + description: "The time spent decoding the data received from the database" + ), + summary("hello_elixir.repo.query.query_time", + unit: {:native, :millisecond}, + description: "The time spent executing the query" + ), + summary("hello_elixir.repo.query.queue_time", + unit: {:native, :millisecond}, + description: "The time spent waiting for a database connection" + ), + summary("hello_elixir.repo.query.idle_time", + unit: {:native, :millisecond}, + description: + "The time the connection spent waiting before being checked out for the query" + ), + + # VM Metrics + summary("vm.memory.total", unit: {:byte, :kilobyte}), + summary("vm.total_run_queue_lengths.total"), + summary("vm.total_run_queue_lengths.cpu"), + summary("vm.total_run_queue_lengths.io") + ] + end + + defp periodic_measurements do + [ + # A module, function and arguments to be invoked periodically. + # This function must call :telemetry.execute/3 and a metric must be added above. + # {HelloElixirWeb, :count_users, []} + ] + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/templates/layout/app.html.heex b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/templates/layout/app.html.heex new file mode 100644 index 0000000000..169aed9569 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/templates/layout/app.html.heex @@ -0,0 +1,5 @@ +
+ + + <%= @inner_content %> +
diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/templates/layout/live.html.heex b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/templates/layout/live.html.heex new file mode 100644 index 0000000000..a29d604480 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/templates/layout/live.html.heex @@ -0,0 +1,11 @@ +
+ + + + + <%= @inner_content %> +
diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/templates/layout/root.html.heex b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/templates/layout/root.html.heex new file mode 100644 index 0000000000..9fd12947d8 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/templates/layout/root.html.heex @@ -0,0 +1,30 @@ + + + + + + + <%= csrf_meta_tag() %> + <%= live_title_tag assigns[:page_title] || "HelloElixir", suffix: " · Phoenix Framework" %> + + + + +
+
+ + +
+
+ <%= @inner_content %> + + diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/templates/page/index.html.heex b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/templates/page/index.html.heex new file mode 100644 index 0000000000..f844bd8d7a --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/templates/page/index.html.heex @@ -0,0 +1,41 @@ +
+

<%= gettext "Welcome to %{name}!", name: "Phoenix" %>

+

Peace of mind from prototype to production

+
+ +
+ + +
diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/views/error_helpers.ex b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/views/error_helpers.ex new file mode 100644 index 0000000000..00f2e5e770 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/views/error_helpers.ex @@ -0,0 +1,47 @@ +defmodule HelloElixirWeb.ErrorHelpers do + @moduledoc """ + Conveniences for translating and building error messages. + """ + + use Phoenix.HTML + + @doc """ + Generates tag for inlined form input errors. + """ + def error_tag(form, field) do + Enum.map(Keyword.get_values(form.errors, field), fn error -> + content_tag(:span, translate_error(error), + class: "invalid-feedback", + phx_feedback_for: input_name(form, field) + ) + end) + end + + @doc """ + Translates an error message using gettext. + """ + def translate_error({msg, opts}) do + # When using gettext, we typically pass the strings we want + # to translate as a static argument: + # + # # Translate "is invalid" in the "errors" domain + # dgettext("errors", "is invalid") + # + # # Translate the number of files with plural rules + # dngettext("errors", "1 file", "%{count} files", count) + # + # Because the error messages we show in our forms and APIs + # are defined inside Ecto, we need to translate them dynamically. + # This requires us to call the Gettext module passing our gettext + # backend as first argument. + # + # Note we use the "errors" domain, which means translations + # should be written to the errors.po file. The :count option is + # set by Ecto and indicates we should also apply plural rules. + if count = opts[:count] do + Gettext.dngettext(HelloElixirWeb.Gettext, "errors", msg, msg, count, opts) + else + Gettext.dgettext(HelloElixirWeb.Gettext, "errors", msg, opts) + end + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/views/error_view.ex b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/views/error_view.ex new file mode 100644 index 0000000000..2641b6f961 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/views/error_view.ex @@ -0,0 +1,16 @@ +defmodule HelloElixirWeb.ErrorView do + use HelloElixirWeb, :view + + # If you want to customize a particular status code + # for a certain format, you may uncomment below. + # def render("500.html", _assigns) do + # "Internal Server Error" + # end + + # By default, Phoenix returns the status message from + # the template name. For example, "404.html" becomes + # "Not Found". + def template_not_found(template, _assigns) do + Phoenix.Controller.status_message_from_template(template) + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/views/layout_view.ex b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/views/layout_view.ex new file mode 100644 index 0000000000..afa45f3d0f --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/views/layout_view.ex @@ -0,0 +1,7 @@ +defmodule HelloElixirWeb.LayoutView do + use HelloElixirWeb, :view + + # Phoenix LiveDashboard is available only in development by default, + # so we instruct Elixir to not warn if the dashboard route is missing. + @compile {:no_warn_undefined, {Routes, :live_dashboard_path, 2}} +end diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/views/page_view.ex b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/views/page_view.ex new file mode 100644 index 0000000000..76043f1154 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/lib/hello_elixir_web/views/page_view.ex @@ -0,0 +1,3 @@ +defmodule HelloElixirWeb.PageView do + use HelloElixirWeb, :view +end diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/mix.exs b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/mix.exs new file mode 100644 index 0000000000..e7602f8892 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/mix.exs @@ -0,0 +1,70 @@ +defmodule HelloElixir.MixProject do + use Mix.Project + + def project do + [ + app: :hello_elixir, + version: "0.1.0", + elixir: "~> 1.12", + elixirc_paths: elixirc_paths(Mix.env()), + compilers: [:gettext] ++ Mix.compilers(), + start_permanent: Mix.env() == :prod, + aliases: aliases(), + deps: deps() + ] + end + + # Configuration for the OTP application. + # + # Type `mix help compile.app` for more information. + def application do + [ + mod: {HelloElixir.Application, []}, + extra_applications: [:logger, :runtime_tools] + ] + end + + # Specifies which paths to compile per environment. + defp elixirc_paths(:test), do: ["lib", "test/support"] + defp elixirc_paths(_), do: ["lib"] + + # Specifies your project dependencies. + # + # Type `mix help deps` for examples and options. + defp deps do + [ + {:phoenix, "~> 1.6.11"}, + {:phoenix_ecto, "~> 4.4"}, + {:ecto_sql, "~> 3.6"}, + {:ecto_sqlite3, ">= 0.0.0"}, + {:phoenix_html, "~> 3.0"}, + {:phoenix_live_reload, "~> 1.2", only: :dev}, + {:phoenix_live_view, "~> 0.16.0"}, + {:floki, ">= 0.30.0", only: :test}, + {:phoenix_live_dashboard, "~> 0.5"}, + {:esbuild, "~> 0.2", runtime: Mix.env() == :dev}, + {:swoosh, "~> 1.3"}, + {:telemetry_metrics, "~> 0.6"}, + {:telemetry_poller, "~> 1.0"}, + {:gettext, "~> 0.18"}, + {:jason, "~> 1.2"}, + {:plug_cowboy, "~> 2.5"} + ] + end + + # Aliases are shortcuts or tasks specific to the current project. + # For example, to install project dependencies and perform other setup tasks, run: + # + # $ mix setup + # + # See the documentation for `Mix` for more info on aliases. + defp aliases do + [ + setup: ["deps.get", "ecto.setup"], + "ecto.setup": ["ecto.create", "ecto.migrate", "run priv/repo/seeds.exs"], + "ecto.reset": ["ecto.drop", "ecto.setup"], + test: ["ecto.create --quiet", "ecto.migrate --quiet", "test"], + "assets.deploy": ["esbuild default --minify", "phx.digest"] + ] + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/mix.lock b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/mix.lock new file mode 100644 index 0000000000..dac6ff8500 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/mix.lock @@ -0,0 +1,38 @@ +%{ + "castore": {:hex, :castore, "0.1.13", "ccf3ab251ffaebc4319f41d788ce59a6ab3f42b6c27e598ad838ffecee0b04f9", [:mix], [], "hexpm", "a14a7eecfec7e20385493dbb92b0d12c5d77ecfd6307de10102d58c94e8c49c0"}, + "connection": {:hex, :connection, "1.1.0", "ff2a49c4b75b6fb3e674bfc5536451607270aac754ffd1bdfe175abe4a6d7a68", [:mix], [], "hexpm", "722c1eb0a418fbe91ba7bd59a47e28008a189d47e37e0e7bb85585a016b2869c"}, + "cowboy": {:hex, :cowboy, "2.9.0", "865dd8b6607e14cf03282e10e934023a1bd8be6f6bacf921a7e2a96d800cd452", [:make, :rebar3], [{:cowlib, "2.11.0", [hex: :cowlib, repo: "hexpm", optional: false]}, {:ranch, "1.8.0", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm", "2c729f934b4e1aa149aff882f57c6372c15399a20d54f65c8d67bef583021bde"}, + "cowboy_telemetry": {:hex, :cowboy_telemetry, "0.4.0", "f239f68b588efa7707abce16a84d0d2acf3a0f50571f8bb7f56a15865aae820c", [:rebar3], [{:cowboy, "~> 2.7", [hex: :cowboy, repo: "hexpm", optional: false]}, {:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "7d98bac1ee4565d31b62d59f8823dfd8356a169e7fcbb83831b8a5397404c9de"}, + "cowlib": {:hex, :cowlib, "2.11.0", "0b9ff9c346629256c42ebe1eeb769a83c6cb771a6ee5960bd110ab0b9b872063", [:make, :rebar3], [], "hexpm", "2b3e9da0b21c4565751a6d4901c20d1b4cc25cbb7fd50d91d2ab6dd287bc86a9"}, + "db_connection": {:hex, :db_connection, "2.4.1", "6411f6e23f1a8b68a82fa3a36366d4881f21f47fc79a9efb8c615e62050219da", [:mix], [{:connection, "~> 1.0", [hex: :connection, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "ea36d226ec5999781a9a8ad64e5d8c4454ecedc7a4d643e4832bf08efca01f00"}, + "decimal": {:hex, :decimal, "2.0.0", "a78296e617b0f5dd4c6caf57c714431347912ffb1d0842e998e9792b5642d697", [:mix], [], "hexpm", "34666e9c55dea81013e77d9d87370fe6cb6291d1ef32f46a1600230b1d44f577"}, + "ecto": {:hex, :ecto, "3.7.1", "a20598862351b29f80f285b21ec5297da1181c0442687f9b8329f0445d228892", [:mix], [{:decimal, "~> 1.6 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "d36e5b39fc479e654cffd4dbe1865d9716e4a9b6311faff799b6f90ab81b8638"}, + "ecto_sql": {:hex, :ecto_sql, "3.7.1", "8de624ef50b2a8540252d8c60506379fbbc2707be1606853df371cf53df5d053", [:mix], [{:db_connection, "~> 2.2", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.7.0", [hex: :ecto, repo: "hexpm", optional: false]}, {:myxql, "~> 0.4.0 or ~> 0.5.0", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.15.0 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:tds, "~> 2.1.1 or ~> 2.2", [hex: :tds, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "2b42a32e2ce92f64aba5c88617891ab3b0ba34f3f3a503fa20009eae1a401c81"}, + "ecto_sqlite3": {:hex, :ecto_sqlite3, "0.7.2", "667338c1e0f7af13f75ab9eec13afcea216eb71dac9daf7897c8f0acc8b5722b", [:mix], [{:decimal, "~> 1.6 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:ecto, "~> 3.7", [hex: :ecto, repo: "hexpm", optional: false]}, {:ecto_sql, "~> 3.7", [hex: :ecto_sql, repo: "hexpm", optional: false]}, {:exqlite, "~> 0.6", [hex: :exqlite, repo: "hexpm", optional: false]}], "hexpm", "b003804132f183d1d6dc759f6c2ccc60c1fb5d62e1db4aa4fe0d38577096f7c4"}, + "elixir_make": {:hex, :elixir_make, "0.6.3", "bc07d53221216838d79e03a8019d0839786703129599e9619f4ab74c8c096eac", [:mix], [], "hexpm", "f5cbd651c5678bcaabdbb7857658ee106b12509cd976c2c2fca99688e1daf716"}, + "esbuild": {:hex, :esbuild, "0.3.4", "416203c642eb84b207f882cf7953a1fd7bb71e23f5f86554f983bb7bad18b897", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}], "hexpm", "c472e38b37e9547113776b1e4b64b44ec540bcc7056dd252c2c3ffba41aa9793"}, + "exqlite": {:hex, :exqlite, "0.8.5", "f4b38c56019d9582de800c7a4057dc228c60eff51212fd17dea60152ab1fb95a", [:make, :mix], [{:db_connection, "~> 2.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:elixir_make, "~> 0.6", [hex: :elixir_make, repo: "hexpm", optional: false]}], "hexpm", "a4a0a34b4f1306852fb951a86d44a5ee48a251c464de8e968e8e2b3fb982fe3c"}, + "file_system": {:hex, :file_system, "0.2.10", "fb082005a9cd1711c05b5248710f8826b02d7d1784e7c3451f9c1231d4fc162d", [:mix], [], "hexpm", "41195edbfb562a593726eda3b3e8b103a309b733ad25f3d642ba49696bf715dc"}, + "floki": {:hex, :floki, "0.32.0", "f915dc15258bc997d49be1f5ef7d3992f8834d6f5695270acad17b41f5bcc8e2", [:mix], [{:html_entities, "~> 0.5.0", [hex: :html_entities, repo: "hexpm", optional: false]}], "hexpm", "1c5a91cae1fd8931c26a4826b5e2372c284813904c8bacb468b5de39c7ececbd"}, + "gettext": {:hex, :gettext, "0.18.2", "7df3ea191bb56c0309c00a783334b288d08a879f53a7014341284635850a6e55", [:mix], [], "hexpm", "f9f537b13d4fdd30f3039d33cb80144c3aa1f8d9698e47d7bcbcc8df93b1f5c5"}, + "html_entities": {:hex, :html_entities, "0.5.2", "9e47e70598da7de2a9ff6af8758399251db6dbb7eebe2b013f2bbd2515895c3c", [:mix], [], "hexpm", "c53ba390403485615623b9531e97696f076ed415e8d8058b1dbaa28181f4fdcc"}, + "jason": {:hex, :jason, "1.3.0", "fa6b82a934feb176263ad2df0dbd91bf633d4a46ebfdffea0c8ae82953714946", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "53fc1f51255390e0ec7e50f9cb41e751c260d065dcba2bf0d08dc51a4002c2ac"}, + "mime": {:hex, :mime, "2.0.2", "0b9e1a4c840eafb68d820b0e2158ef5c49385d17fb36855ac6e7e087d4b1dcc5", [:mix], [], "hexpm", "e6a3f76b4c277739e36c2e21a2c640778ba4c3846189d5ab19f97f126df5f9b7"}, + "phoenix": {:hex, :phoenix, "1.6.11", "29f3c0fd12fa1fc4d4b05e341578e55bc78d96ea83a022587a7e276884d397e4", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix_pubsub, "~> 2.0", [hex: :phoenix_pubsub, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 1.0", [hex: :phoenix_view, repo: "hexpm", optional: false]}, {:plug, "~> 1.10", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.2", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:plug_crypto, "~> 1.2", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "1664e34f80c25ea4918fbadd957f491225ef601c0e00b4e644b1a772864bfbc2"}, + "phoenix_ecto": {:hex, :phoenix_ecto, "4.4.0", "0672ed4e4808b3fbed494dded89958e22fb882de47a97634c0b13e7b0b5f7720", [:mix], [{:ecto, "~> 3.3", [hex: :ecto, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 2.14.2 or ~> 3.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}, {:plug, "~> 1.9", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm", "09864e558ed31ee00bd48fcc1d4fc58ae9678c9e81649075431e69dbabb43cc1"}, + "phoenix_html": {:hex, :phoenix_html, "3.2.0", "1c1219d4b6cb22ac72f12f73dc5fad6c7563104d083f711c3fcd8551a1f4ae11", [:mix], [{:plug, "~> 1.5", [hex: :plug, repo: "hexpm", optional: true]}], "hexpm", "36ec97ba56d25c0136ef1992c37957e4246b649d620958a1f9fa86165f8bc54f"}, + "phoenix_live_dashboard": {:hex, :phoenix_live_dashboard, "0.5.3", "ff153c46aee237dd7244f07e9b98d557fe0d1de7a5916438e634c3be2d13c607", [:mix], [{:ecto, "~> 3.6.2 or ~> 3.7", [hex: :ecto, repo: "hexpm", optional: true]}, {:ecto_psql_extras, "~> 0.7", [hex: :ecto_psql_extras, repo: "hexpm", optional: true]}, {:phoenix_live_view, "~> 0.16.0", [hex: :phoenix_live_view, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 0.6.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}], "hexpm", "e36e62b1f61c19b645853af78290a5e7900f7cae1e676714ff69f9836e2f2e76"}, + "phoenix_live_reload": {:hex, :phoenix_live_reload, "1.3.3", "3a53772a6118d5679bf50fc1670505a290e32a1d195df9e069d8c53ab040c054", [:mix], [{:file_system, "~> 0.2.1 or ~> 0.3", [hex: :file_system, repo: "hexpm", optional: false]}, {:phoenix, "~> 1.4", [hex: :phoenix, repo: "hexpm", optional: false]}], "hexpm", "766796676e5f558dbae5d1bdb066849673e956005e3730dfd5affd7a6da4abac"}, + "phoenix_live_view": {:hex, :phoenix_live_view, "0.16.4", "5692edd0bac247a9a816eee7394e32e7a764959c7d0cf9190662fc8b0cd24c97", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix, "~> 1.5.9 or ~> 1.6.0", [hex: :phoenix, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 3.0", [hex: :phoenix_html, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.2 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "754ba49aa2e8601afd4f151492c93eb72df69b0b9856bab17711b8397e43bba0"}, + "phoenix_pubsub": {:hex, :phoenix_pubsub, "2.1.1", "ba04e489ef03763bf28a17eb2eaddc2c20c6d217e2150a61e3298b0f4c2012b5", [:mix], [], "hexpm", "81367c6d1eea5878ad726be80808eb5a787a23dee699f96e72b1109c57cdd8d9"}, + "phoenix_view": {:hex, :phoenix_view, "1.1.2", "1b82764a065fb41051637872c7bd07ed2fdb6f5c3bd89684d4dca6e10115c95a", [:mix], [{:phoenix_html, "~> 2.14.2 or ~> 3.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}], "hexpm", "7ae90ad27b09091266f6adbb61e1d2516a7c3d7062c6789d46a7554ec40f3a56"}, + "plug": {:hex, :plug, "1.13.6", "187beb6b67c6cec50503e940f0434ea4692b19384d47e5fdfd701e93cadb4cc2", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.1.1 or ~> 1.2", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "02b9c6b9955bce92c829f31d6284bf53c591ca63c4fb9ff81dfd0418667a34ff"}, + "plug_cowboy": {:hex, :plug_cowboy, "2.5.2", "62894ccd601cf9597e2c23911ff12798a8a18d237e9739f58a6b04e4988899fe", [:mix], [{:cowboy, "~> 2.7", [hex: :cowboy, repo: "hexpm", optional: false]}, {:cowboy_telemetry, "~> 0.3", [hex: :cowboy_telemetry, repo: "hexpm", optional: false]}, {:plug, "~> 1.7", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm", "ea6e87f774c8608d60c8d34022a7d073bd7680a0a013f049fc62bf35efea1044"}, + "plug_crypto": {:hex, :plug_crypto, "1.2.2", "05654514ac717ff3a1843204b424477d9e60c143406aa94daf2274fdd280794d", [:mix], [], "hexpm", "87631c7ad914a5a445f0a3809f99b079113ae4ed4b867348dd9eec288cecb6db"}, + "postgrex": {:hex, :postgrex, "0.15.13", "7794e697481799aee8982688c261901de493eb64451feee6ea58207d7266d54a", [:mix], [{:connection, "~> 1.0", [hex: :connection, repo: "hexpm", optional: false]}, {:db_connection, "~> 2.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.5 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}], "hexpm", "3ffb76e1a97cfefe5c6a95632a27ffb67f28871c9741fb585f9d1c3cd2af70f1"}, + "ranch": {:hex, :ranch, "1.8.0", "8c7a100a139fd57f17327b6413e4167ac559fbc04ca7448e9be9057311597a1d", [:make, :rebar3], [], "hexpm", "49fbcfd3682fab1f5d109351b61257676da1a2fdbe295904176d5e521a2ddfe5"}, + "swoosh": {:hex, :swoosh, "1.5.1", "ec1b3fa6a092597ac02444c36c6e3c2bc90c89c02e4e0cb262725d07d610c989", [:mix], [{:cowboy, "~> 1.1 or ~> 2.4", [hex: :cowboy, repo: "hexpm", optional: true]}, {:finch, "~> 0.6", [hex: :finch, repo: "hexpm", optional: true]}, {:gen_smtp, "~> 0.13 or ~> 1.0", [hex: :gen_smtp, repo: "hexpm", optional: true]}, {:hackney, "~> 1.9", [hex: :hackney, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:mail, "~> 0.2", [hex: :mail, repo: "hexpm", optional: true]}, {:mime, "~> 1.1 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_cowboy, ">= 1.0.0", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.2 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "867395cbf0d764b24b6bf3c375137b98432cedb8c7f91ef9bd1c379cf626ac42"}, + "telemetry": {:hex, :telemetry, "1.1.0", "a589817034a27eab11144ad24d5c0f9fab1f58173274b1e9bae7074af9cbee51", [:rebar3], [], "hexpm", "b727b2a1f75614774cff2d7565b64d0dfa5bd52ba517f16543e6fc7efcc0df48"}, + "telemetry_metrics": {:hex, :telemetry_metrics, "0.6.1", "315d9163a1d4660aedc3fee73f33f1d355dcc76c5c3ab3d59e76e3edf80eef1f", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "7be9e0871c41732c233be71e4be11b96e56177bf15dde64a8ac9ce72ac9834c6"}, + "telemetry_poller": {:hex, :telemetry_poller, "1.0.0", "db91bb424e07f2bb6e73926fcafbfcbcb295f0193e0a00e825e589a0a47e8453", [:rebar3], [{:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "b3a24eafd66c3f42da30fc3ca7dda1e9d546c12250a2d60d7b81d264fbec4f6e"}, +} diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/gettext/en/LC_MESSAGES/errors.po b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/gettext/en/LC_MESSAGES/errors.po new file mode 100644 index 0000000000..844c4f5cea --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/gettext/en/LC_MESSAGES/errors.po @@ -0,0 +1,112 @@ +## `msgid`s in this file come from POT (.pot) files. +## +## Do not add, change, or remove `msgid`s manually here as +## they're tied to the ones in the corresponding POT file +## (with the same domain). +## +## Use `mix gettext.extract --merge` or `mix gettext.merge` +## to merge POT files into PO files. +msgid "" +msgstr "" +"Language: en\n" + +## From Ecto.Changeset.cast/4 +msgid "can't be blank" +msgstr "" + +## From Ecto.Changeset.unique_constraint/3 +msgid "has already been taken" +msgstr "" + +## From Ecto.Changeset.put_change/3 +msgid "is invalid" +msgstr "" + +## From Ecto.Changeset.validate_acceptance/3 +msgid "must be accepted" +msgstr "" + +## From Ecto.Changeset.validate_format/3 +msgid "has invalid format" +msgstr "" + +## From Ecto.Changeset.validate_subset/3 +msgid "has an invalid entry" +msgstr "" + +## From Ecto.Changeset.validate_exclusion/3 +msgid "is reserved" +msgstr "" + +## From Ecto.Changeset.validate_confirmation/3 +msgid "does not match confirmation" +msgstr "" + +## From Ecto.Changeset.no_assoc_constraint/3 +msgid "is still associated with this entry" +msgstr "" + +msgid "are still associated with this entry" +msgstr "" + +## From Ecto.Changeset.validate_length/3 +msgid "should have %{count} item(s)" +msgid_plural "should have %{count} item(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be %{count} character(s)" +msgid_plural "should be %{count} character(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be %{count} byte(s)" +msgid_plural "should be %{count} byte(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should have at least %{count} item(s)" +msgid_plural "should have at least %{count} item(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at least %{count} character(s)" +msgid_plural "should be at least %{count} character(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at least %{count} byte(s)" +msgid_plural "should be at least %{count} byte(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should have at most %{count} item(s)" +msgid_plural "should have at most %{count} item(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at most %{count} character(s)" +msgid_plural "should be at most %{count} character(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at most %{count} byte(s)" +msgid_plural "should be at most %{count} byte(s)" +msgstr[0] "" +msgstr[1] "" + +## From Ecto.Changeset.validate_number/3 +msgid "must be less than %{number}" +msgstr "" + +msgid "must be greater than %{number}" +msgstr "" + +msgid "must be less than or equal to %{number}" +msgstr "" + +msgid "must be greater than or equal to %{number}" +msgstr "" + +msgid "must be equal to %{number}" +msgstr "" diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/gettext/errors.pot b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/gettext/errors.pot new file mode 100644 index 0000000000..39a220be35 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/gettext/errors.pot @@ -0,0 +1,95 @@ +## This is a PO Template file. +## +## `msgid`s here are often extracted from source code. +## Add new translations manually only if they're dynamic +## translations that can't be statically extracted. +## +## Run `mix gettext.extract` to bring this file up to +## date. Leave `msgstr`s empty as changing them here has no +## effect: edit them in PO (`.po`) files instead. + +## From Ecto.Changeset.cast/4 +msgid "can't be blank" +msgstr "" + +## From Ecto.Changeset.unique_constraint/3 +msgid "has already been taken" +msgstr "" + +## From Ecto.Changeset.put_change/3 +msgid "is invalid" +msgstr "" + +## From Ecto.Changeset.validate_acceptance/3 +msgid "must be accepted" +msgstr "" + +## From Ecto.Changeset.validate_format/3 +msgid "has invalid format" +msgstr "" + +## From Ecto.Changeset.validate_subset/3 +msgid "has an invalid entry" +msgstr "" + +## From Ecto.Changeset.validate_exclusion/3 +msgid "is reserved" +msgstr "" + +## From Ecto.Changeset.validate_confirmation/3 +msgid "does not match confirmation" +msgstr "" + +## From Ecto.Changeset.no_assoc_constraint/3 +msgid "is still associated with this entry" +msgstr "" + +msgid "are still associated with this entry" +msgstr "" + +## From Ecto.Changeset.validate_length/3 +msgid "should be %{count} character(s)" +msgid_plural "should be %{count} character(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should have %{count} item(s)" +msgid_plural "should have %{count} item(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at least %{count} character(s)" +msgid_plural "should be at least %{count} character(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should have at least %{count} item(s)" +msgid_plural "should have at least %{count} item(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at most %{count} character(s)" +msgid_plural "should be at most %{count} character(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should have at most %{count} item(s)" +msgid_plural "should have at most %{count} item(s)" +msgstr[0] "" +msgstr[1] "" + +## From Ecto.Changeset.validate_number/3 +msgid "must be less than %{number}" +msgstr "" + +msgid "must be greater than %{number}" +msgstr "" + +msgid "must be less than or equal to %{number}" +msgstr "" + +msgid "must be greater than or equal to %{number}" +msgstr "" + +msgid "must be equal to %{number}" +msgstr "" diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/repo/migrations/.formatter.exs b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/repo/migrations/.formatter.exs new file mode 100644 index 0000000000..49f9151ed2 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/repo/migrations/.formatter.exs @@ -0,0 +1,4 @@ +[ + import_deps: [:ecto_sql], + inputs: ["*.exs"] +] diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/repo/migrations/20210505214438_create_a_migration_to_run.exs b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/repo/migrations/20210505214438_create_a_migration_to_run.exs new file mode 100644 index 0000000000..f54f5a50aa --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/repo/migrations/20210505214438_create_a_migration_to_run.exs @@ -0,0 +1,9 @@ +defmodule HelloElixir.Repo.Migrations.CreateAMigrationToRun do + use Ecto.Migration + + def change do + create table(:testing) do + add :name, :string + end + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/repo/seeds.exs b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/repo/seeds.exs new file mode 100644 index 0000000000..d86886fce4 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/repo/seeds.exs @@ -0,0 +1,11 @@ +# Script for populating the database. You can run it as: +# +# mix run priv/repo/seeds.exs +# +# Inside the script, you can read and write to any of your +# repositories directly: +# +# HelloElixir.Repo.insert!(%HelloElixir.SomeSchema{}) +# +# We recommend using the bang functions (`insert!`, `update!` +# and so on) as they will fail if something goes wrong. diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/static/favicon.ico b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/static/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..73de524aaadcf60fbe9d32881db0aa86b58b5cb9 GIT binary patch literal 1258 zcmbtUO>fgM7{=qN=;Mz_82;lvPEdVaxv-<-&=sZLwab?3I zBP>U*&(Hv<5n@9ZQ$vhg#|u$Zmtq8BV;+W*7(?jOx-{r?#TE&$Sdq77MbdJjD5`-q zMm_z(jLv3t>5NhzK{%aG(Yudfpjd3AFdKe2U7&zdepTe>^s(@!&0X8TJ`h+-I?84Ml# literal 0 HcmV?d00001 diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/static/images/phoenix.png b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/static/images/phoenix.png new file mode 100644 index 0000000000000000000000000000000000000000..9c81075f63d2151e6f40e9aa66f665749a87cc6a GIT binary patch literal 13900 zcmaL8WmsF?7A@RTTCBLc6?b=ccXxso4H~R1?gT4RtT+@6?yiLril%4@T7niU{_*z6 z{eIkY^CMY%XUs9jnrrU0pClu(+L}t3=w#^6o;|}(O%cy#x4LjZZH1q*$X;nePbVE4Ruj~ha0EO zKNwDso99#XvuEN`AWs{Bi@gtxt-YhOy9C{FXD=O%vz-K;k$?ubhNqmple2Q5m%Uz~ zramCh1t4NaCnZTE4ibGLaI^QZp#izMx_gU)Bn$}9dm*VB;%os*A`rzjVfzrR1HKOd)umm?RCh=|BP9K5_7PY4e00Cyi75Qn=r z{eKwb?Y#kB&YnKb9_}>%FxuF9`1(lDJt_Uy6x=-jOY83a?=n3Vj0LBly^W8Dm%fLG z>wl`K?d0L(;qBz%Nh7BxK%-#;aCZOa_%B{VLsZ4x+sDQoV6P%CLHESK>FjJL%Eu=o zC@9Y_#G@c6$it(+FQO9uXOy|HR6B0DRr--F^NOYxjR*h5u*lKds>A z`IK4S-pkp~-cHfW!;R+eltrEYw-$l_$@lMAyZ^04@PEc~J&ED^XJP+;3;mx{Pu=s+ z@V{;QbnxHCw|9T)cCV+l_Rhg0diIRBPeoovAGCCkhmu7!e=!0j%CIc1U{;0rzhnzj zRH%Ot=y$J%$R~ap!UOQPkR*PGC6W<##xjgp8{rXFTPGUhD7@5RKexzmd%We{#b|6i z`?lh2^&{jx)SK#0PhPgi&eUZ0vBcGiH`@-FoRy{i3j{L(leZ-WVvvA2{XVGbnr9s* zG$JW*Sqd>q(BQkwNG{TIu68tN%oQnb6^FFNR~xPl$I zm|>W*j{xhT(g3sl-2z1KY@&qA0a~--8mlbo6MSY3Sy29DZRC=_#b9K&IcW(xbn3qD zali;DIL*NQ2a>E?#=CXQMk;2IJDpfLGR5_w?UEM;`!OQP>sJa904@JRBdgqw<{A-f zPODilVldJY3tG8mjj<9Cq%HNX;km>BP=EQ!_>VT)lC6`dm~$b&B*aCJ*_t6bQD*XIIA zrrq#>z~6ik=?Q&P-|3PvgPI@=_MRFRi5f&qlac?_B_cT$A11<`f;&+p^s(QUcKGMS zNYwS6+Y109HVx5PCw$%fR|2X^WJR_R&T>NOOaXhEOOBl@ACRbf{Q38g%!l_W!fCv{ zyn=GMr7&FEFtoISlT(_%iFGOyAW*%LTFx{?IMb~HaOTxco0(xXa`wb0B-{sjpkZ9F zbnZMIZIc!;=Qqv2^WY_d{p1IDf88Rxts3(SLO{5`#Xi5aUOr5);GFV06(V2G0%QE` zw{cbL@W!uuqA3n1q)>mMxU?wl*Pwndp(E*^iJ@$Hm4EfeJ`y=_@(E_@&+FH@D;5#% z%5izR;P_>FEfS3Nmq*3SI-GpsAP~&&m$citnCRwyK%Fs4!m6qG(fj((-y-2~&7)oQ z4#JKn4nA=SUWP)V&DUvjP#Hz?-yUdXY;@ zNlmhBn0p;i0j^5OqhqN%)6E;;VN5UVdzE$GmIS%ZKVBDViH>uKNOQ&Uq5yG0Dlp-V zTpnO8cV6#UAk z)?vp{kNcLNu9V6yaw#|j*h9p`zNZJMyYcx_9Zx@es61Md4Nc*y09>UV7@wE@EGya!%G<~=$Cg%(LWWrD<&NXYR$#UpU; zl-N8X3auH&u_czz`2@`)@9^Q(Z%i7Hf=u*EDPZM>R2Fk4J#Q=0-x+Y2G~abPx7&Ra z2NL1RzJ6GzOMmMRqU6 z$VT^YqYCg33>3Q}C1=wdL-qO~RY!>-RljOAeEMmD^wu(R)f~VT!$Ug{0mvR$s&%fPY=gWk9kNN8m)<5-VE?(DW&De z_K7#3AU;h7d9k4~t}aji!~JOUAShjMOMAIETdSX?IMsgoD0hRthVvFz_Pv zdB+jF*ZW#({d2~{sX9F*h~py)k>5uVOoN%aFYVn4R`h41lz|0c2VZIB=nppL5y=g> zu!5%WhCXBkP}Z@2N_Vz!AzjR@qHsS0JYuj-#`U;&ZpDXpK_mAhyos?3Q{PNOL0pmg zC+VYZt}AEuYBcotKWk`m>a(=zjXxDB3#5Um zVOPP7@tHWfoJhBge!5gA4xHSVT7cu2&GC^pQ`A)wCChhgTf&%uxo`T!dK!h-3`){W zpvJr6%XD*gpM-&tSGPXMc(X9$3n{M4OiY7A9Xmh?(uP=TgDFkP-egM4nbFfm?^>b$ zOW3Npm^VN^_io|YL=pYnX73Ft-K|c|A1*#YT?(+WskD4SwQN8cBq))xT(;M{@0~D8 zL`ANR>lb0mKLRtNENx&SAp>P7857a%ZP{0S3snYW+tbd!X-*{GL}**b@G};C z)Q3bSoD}bG=Jx$POx1UDzM= z`-IZDl+GJgv`ehIT0``{&WDsH3nEG03F1%AU(!=nGsjuyzcneB{{lp{>#5)ndCUO;OINf(7fpu|jyopb#q zlcAO8B?*00y0gq?{w~Rm#QuV^oj)tPcv!7-@bCr?Zk?hlTDK)}c8r_PG$e2Sxtqkw znT9qczCHX17&fsDl3Vm2V-Aarj3y0gN1oyt+l*_2>We#0j5b%9+SO=cHnf?jhBVL* zc#p)VMKXMa?+hxBt}v^^v`27e&jC%v7U zYKYuMhjG$Ix{NA9pgZ+vM>wy}WFw4vHwJAgeD0=m%D2|9gU5(o73(HHxx~ z$`tS4W>`?peBKOuh2OZWrn>N15K@lt?#^(;0WnTZ?_LtcuN$kZ4>wSZ(5iUWZ$`jTC z_ci7nCc@Rp`ZOBltEe^pK#3|uV{VnV_K305Q3%H-7{5pCjN#f=F$6GY0!$*`&2k!S zIddNLT9i~PSY$C(Vk}fNjSg5anR_qHRGpDH-%`M=-M#Uy)$8I8o`groI|!?V_x3%D z*jIq7JKZ%3t7W0A9=PatJ(#|9PuiW+t}h-&qnBZ5P*GhxNr~gqcYtmMghEcf1;N$b z?-KJjMQTx=;qx4;2QzXIHdtmV{?c(qZn=JMuV7*~^o}L0PZRG-cNY-v$m+tCNWA;qfeK|Ja$ z?dtZ+=kKMyDZQ?#yBJCu@vCPRGRG#W=#Uqy7gWdT#9=CV-aUP``ekX{im2fj$(ICH zrqyj>sx@=@VhTUP^u8#smC#HX@iA!B1&~*#t~u+7Nq74FS*V0Q0?u(R5}(HKHeXU| zaX6UE!_YCc0<@~U?km)OK|HeGDJuLE1en`EE(|f3b_8Kc>^KoR$h}C4y*efcDc79k z)u3b4(j8swz`YC~>rtU}6ui^r7(E_B<4DBV|5_E&6Rp|K-w*sw)y8zPZhwG05z^^w zLRAg*Our%j74=A`>3&;5GjxWvxa*y0L3)y#_vIKsT*HJxThAl=kcG%Qs?J-inZbh@ zq`FJ)@rN?G3!zzcyL6$GtD~<-+L`H#r!{AWlr~}E%2bRDzO|+VWq4@vyEP<&_QmKI7yfHm7c|~ zkdcGa5KJs;WE|^Wm#k^lqqyS>>?&VZTzP8uAppMl3)U|MmG^Sp-h8%HE>eK^IF3|u z6blQxe|+599-P{(w9u$@#Po)>v4I0!Sh_Zp$De)M6#l5 zMLd&@Q!>%r&X>3(dy1Sy?PO++U1`I)&{?M@Uo z%#2bAa3&rk<63k``;b?*UQ=TG&ME|}*pK;D6(8EIW`d64<`Ai~rNBrJ{k%38h0VrZ z)(*?!ceIz6p#l3bgLvo%tKy^07Gr2rg@|ENO0eGhf^tf4;XC)3w)a9%k-CFMjbN)`@oRUehd@f#YrH`!qtJ(}CQ8lR z+MUwQHG!ZjF=2+LRco1w;NA)|e&(F=;@5@~YvQ*}WwH|1 zW{l!fpO$_sGYm*FDc`WXx|&tI;x;P(o+0HlocYS>GuQ0YJ}uF5G$wr!TF%IET{Q4|>d}!k>Q%%+Z{vc^)k{}BmP<=f)KU-84}F(W3?QXO?M&M_+fH%H zP1RGVhy8_TH3xc5er1$IF9!{db){AF1?8D6r6x6UC#X=y=*ObiCe zZ|cKVcuN6?)kxDj?`&dz$0gLFecX{V&Au;2g)e>UH(kt49)MhGU9UX2($=TV6dnKe zCR!eldvubP@OGmDCuf$w`Jo*ml6I!*Z&(Oa{eaWP`8m*aE|7#?ovVrug{PNqINSdu z@u72)Vd`WJ6OYNAB#+hOE$k8B(PtN)wdfZ;ELi6(7IlI>Ir~TU<;xx4Tn0^Lm885k z!2|CbsSv##hl_!eoJ#>wpS`2KtE(5CZ!Hf~l*~7UMiIR+&UO9*juK5%YYJjtkERgP zggP=dxb4%E8W((`2g)%g?g>E+RZW)7*L)HMnl}Lnu;J?<6ODpm3RLPGq6Vl;z|aNp z5*5uzK$K)Bp{dY?A*8crtu--(0(l+bO&*>5!u!KQD+;nt(a~g^`=2T;v-g>ul$x_u zLcQ{AV+YeSFP`@OYqz>QCGH1>^M==xc=@-W?jSBT@vfSWgAluU7WT?eutjJ2$9ZSdl;^rlm2JPtQ%6@Y$l7(6B9 zlqVdq@F&qdugX5%1MkA<3y`rQM$#0zn1``Jaacc^tu(EL=wALU?vJ70Xwx&+^%@ab z;OsbwDLNe;#0Iv-_)%@b(BG3aEi4P?nhDFaEm@06YtqSK88&-%%KNKLjXM)jlt$0d z(q8vr_pCL!w|MrQ((|ceeWT@-V(H#9J;(%sS2B8f8}xNox|N@GD5loR?9+n2fWKZY zc(Y*>gX85*ALqgajeA^)lhbXRioH>St-U3|TRjZd87wh*%kX(J1H3jQhhtV+p3fcPQ>XQUKsF9mm zoH!0Sr&YY;%y1%&bJqhNV_vk;?sx~5__YLXe|G`Bd!GququTI(0J-~}A@a(HCwYmO zWj>cDZ4_FKb}1f&lN4TD2*1zVVhK*wFN*D6oRC-~%)GsE{(N>owOd z%1cRV&^^^z@YP_}sI0j+rz_3|Zk9B;z|^}WEhV^Bpm;=Uf9IpY5Fn6A|FO@j7Z8&B z96ZFHGbnNB^C(Vfa20auH(3;B>~V!Yon}t?kpi_J#_}@sKCrK4uY_Xf`p7hv`XQ=8 zWNp{9H3nF%DY43p1+@_OnTmXtj z%WgVqwJ!5UnSrBy?rhLiXKT?d}y73{iOJdN@mhf#J?H_awxEp#WUbKF{0}s=woC6Y47);j* z8rB1{w*AVT>0NSmFtEae;*67g8T_nxO0c+ov@>{eu5n{@#RGTr>^Bb8=wBEbB;0`7 zz|!xSHUh-AuPL^G!?~=j#GR%GzgKr%icju#i74clZV*{+CP!VXw1lVu78LdOSdw{V z{4*;Lt7ier$fJSEz6+QygOA+}x_4ilo(2pO&gO2#M3YigPU!~HbZzFpPP(m(7_Dq( z6E$iYyBlF8m8$F1Cuz4}csC&yn=cM8WVgfaL&h75{Shd3)~!cR zCrAVcxl!YrKl=V^piF14E39&aLJVb9-eT+g2xImTQ%l7;}SHq_(LSbo^EM-HXXtZ0O zdW3nm2Xc86CsIwEsbP>@Q~2ojkx)cvw^BKDjB5;4cJZr2KyPiMdSz9LK~+wi4%NKr zbN2DsiY=l;nH8!iP250F?V2V~z(9!|pVCyX9mL_@_ zlcc-NP!BZ_1zEf>pRi=1_Kqh(3X+M9b?No%R8SQvDbofi&Fz$Vs(U!_CusVn+==X` z4cUNCy9%^!gq7dHZ(d7yf82(&o(5y7mF`*OIvT28jRocQywzcRqsbN4HuB~hLSmiP z1-e(k^;S23LfRT&ykT>g@~+hOx!lg!Sf~$2v?1w2ja>QgaJtM|?p@SM9&ls$0J<8;>A`IHQY5INUj<+t`aZ}v)4 zTMv2I_QwzEM=Wg(QohmrlBbJ|jcKc6rM(eJ>_{Ce7!j7Wl-87@z;z5`*K8^*wY?^P zXZWbVI~{|7l7A`bsQ034<(8h(+iSK&8}ijuX4p=^0dk;0zaKuYr~S&idu-;u+p3y# zh&LfPIM%YArf&^E-XlY^y8hl$%bp>Gi+MuNLb0pOLODZ47f-(U&F8UH%lFk)H3Pg8 zGX$RR8odn{YWkC>IU_o}?Bgs(hY9Wy8?sIR0}Vgrg%#6#9%R$r^539t@SnujcyONj zpE?(`U`-_m!Nt>6WU8?;PR;ou0f`wuvuj1xX4j}4+M{ZmBHI>~O54)>S3Z}=gNpD= z-B$ESnoSp)Ib~)v6o{j~ZKMpo4IJYIwwCY%v9+$k%2a=ut+ETf&f;R4JYriH_yjfh zcF16FMV7{Bm~xVwCmSeQ>{H^VpmBwKi?xX5tMS?s%PV;WKlk>RF2_ zaQ#KT_9dmokkCTOdHzpHF5DT*Q$Z=`2&Z8*iEw|IL>%}ep?*ArUV@HuU70}fr}vsu z7ct2;mYIn^8+D@M!HHQVZamDm4kufo_&Lv2PQ+;2qON&of3i4Z`6^WdW!GxVHw*o( z9RCu?86CO{>RZqmkKJi#IZw5A|C&P3R7~+e1O|KX>AO!{L~~2Q^j{VcJ?fn1_JtHu zo#68?Z;9QhCQ%>Wl+v*xbCBkOYksQ3ErxKmI#@o+=yEv*{noTagX`J);d!Sqs6~1- z_t3kU4AG&!bh}$vq8bSpCgNXZ%R$m zvOkBz6;t?`*dmP4KpQa6S(Tb1v2UM_yTrv=nIeEr4bEdkEf&tcKxgqz=0#_b6#}=d z<1+YBT8K_dgbVSiDuNBJv!Zzw;~H`1CnOI;NRH;M5O3aN0V4|fV%s{@tfO&#!{~vE zXkC?8J?SKAwT&lDA&ld*Yz*V@55gw}#xX07=)to%1He+@{4HiU*{$`=4_`dDSl!dE zrb@kaTRT7dc#5TRzxH}})^%cZIN6|2;?tLujjh6Ku4c*Pw+2LJ{e43$piypJ3@{zz z{ZyQ_eCg6H#lsA4@F@ubKQ?$Sr!)(1u-g0Y@!Y3D0$d`L8{h{xE*7}P)$8&a||XD*TfFRvL{%LTfbnlB1i z`xZ=4^3YZ0(&j19vpsX0>pdpp@?^hP1Lua|`g^OU4F@JZvt-JBeIhxTzTB`_7Ha(C zXpMKEgjelG#+Z1pH3QN?T{LaXLXs&7drY%!CjC6=jey#;hs!{-|i#z2tEed4Ti=&S3x@^6XZrGR|k} znjEuABs|D(T|wc}%1sHwoY(yB{a6Ys6`5RKt#YYI&kJ0bNGe4P*Uq9}0YZR`s>=o) z$^kQp3e)J59I>B@@PGAi_X6G%Sved~($wM_il`m%ViYFIyuN(JJ|msKAXrNRV#341 z1|2JQNES0Z;*5kT&$YHc%^PE`bnRw~uILz)Jn z)rtYuuV1r^>4a@XS-a!^ETgu|Hbj0rKjU`uCKq2mWUW!kEocyb*qm8%j`6#5FX;H5 zH}?G7Z?<6e>UQ1ZW!lOfGLsiJ6Cmv5nnJCrOjaP?lKh2^41eXWTy*hxjZKwSr_VJ}-~$&#D3 zzhiEKdrOMKKU0O4xvH7-t>i*p@I!2=k5-G?6tO+uraKwk8#JkfX*#Z{*%i}i_x~lXo^+A!ibrcM>WX|z89iEn| zyC2#BpijrGcW&p}+^3j>Wt$A*=Jrvh8ETLM8aKVsi0&;hlS@-###$Xy))F)OMv57; zZdh4t?c_)zrcUIaOVOUk1$;wMCE>D~-O=N0NFI9^e^C}x37OgGLo)!Q zl=io=P5JDB<$lI%4Y+J3XEphD`qO&Kd_8!yc<*ECCAvC#XTpXe+6u_cmTjEJ| znoqk>=_ZZ4uO5-(m)F08ceF!p<}!?TgW`7279=mKmj~~5tj;zg?PgUz-)5VMM%0j%)T?pU<0Uk|D3p5{2e??#5jMB{Y!BJEFH zuWNq7jM!7<2zWCvPQRj%cXAC#;y_}2ul?h8L$gjQfeIy;;;WXDudit7Uv|Z2b;SrX zfetgr<80WRG+xgFc;C!8+A#ako200^e2Q~AmM2ENwvrd`El^q3CVWk8#pR}l6cCg~ zUYS?4ylI87x!WdHAgi(~ry661S05Qi1wbZZh3H*x{Rw|u!|$*brVLWole{Fe)at#5 z&|6f+nmc3oc&?6vkxR;joiAOb9VuypZ0J$RUBbNxlH~&My}W2{rLRnL z_-^!!5*@@mLvLnIN0QiIhGHHqzPd<3m6&`Vvw8X{6CQBzCaG00F|!`5<-vmAC>~F}0=9+5g-X4W2>mQBUE2eh0%g|SqINm6Te;DOFibuJZ*{m1m-=$li zA>OF0B&aPG^YmL#sfV^T*RCPN%5N9BL>0$sDyvtimKQ1W9gBJ=5(@^odQd1zJ)8Lo(zG zeg;Iwc}daKZlFmS1a-tPNNEfJ99rixy+0qS+Sm5iq zL+jh*2DCx)TBOktKeP!XXqS-sX*+N5l;5o1VpaD@M%Pak^Vqbsa_Eo0WNcXh8i zafO?AZFRj;yl(n{r6|&IBA_<(2I?rB(2@jt?Fv>m#>YoLznm1vhc1`weTd-;OKNlU z7eAu`QWzX1>w@I0VgfW#HL`x)yyghsLOaU(#V{i%@fmXs*QfgI)M>KgCz&&%`=PNZ zPu+yGi`h*t8-5KMsj5_yxl+d&O}k-3yJGaH4TJX)ynmlzXsKl%oOgmmFTRO-s`ckV z&u!9meAquxYhwk+gHo^`Q|*lIBH2K=|B*NDyfTf|*+wzNwSNZ2hkhakih?%7j(lPT zD;YT{1@b6F_gc~lu)m$%A9Eb*aK&Q@qrFOd-)-p{v7hkz2lg2jw=-pNt0yOAU(svi zLYL#99x*+EkqXq&U$tR)E{^73j>i*upyP+bN9CfUhi~MgD<%5{I+<#AWsg?a)U-af z&|(T&_pI1K{XL`TB94{Ou)PPi5Y+MbOb^}#nvWufpZWaDcRLGjsu}h_miC|C;Ors| z=3G3ILzSiI!nCg+;$03@KDrVVI`VxANUQz+09hW z{~WkYa@aKYcKD$MeY0x*7Sec0vr5BAj`1Ov&~s(J`O2>w{g%{Jq-lIT_L=68?J+E* zGGTu~fpOk97y&7_Diw3aL;G8#ku@_Hyb)LWa$+&s zEF~rPhKO&PraSlge{A(pz0+TTl9mN_uDi-)@vS9E8zK$1amRo!FM&6Ys)yQdvVSt? zd&vc0p2sNLeK7sJ7^QO9Xkp(Tm$9A!ml{~8K2#1711%(JGl8Eh9QYUDKEx@cv!JHg)>??HhpzbPA3DM&~U< ze~Rf!mHiBTPgT>F;L?v|Ymp&(l9!ZA&Mt9(uv}|zk8-{XfKyu7vYP#;ao1qBoecXG zs7P|7#x6hY;x|`wfR2^)K5ub~0ncUzK+Ybe)UnPC7iajN`lE-k73KK}UD zKzHTYGesC!j*8N598|aVJHKu;Qd&wK$pOh<2p%XS*W6`g#nH`{4mC<`Tm8tWUzn}AWi3+;%dy%2o{JaR5Qy)!>H z%gz0!Cx`4fqYzD`j6j=|L6X8+kHP1A*E0lNx2(ItObT73J3_eKE@=MB4=jMRRrw62 zG<8C+vWR^_5OLT~3Brb~kl1OQ5_pGlWb@Ulbtbkbg~d5y_X_mvTrZdJ`R2u?sF<7U zZv~d(&CJ-A72TvW_u`}1Z=|JAbP7kMUj`&-f$L>F7R;6ggDkC*jsf|P&oalP8U8fK zT_2wdY0JFNakO#`swMjx zM!cT4Z}M9M_60r_9>16xcaX^`A9gqPZ`l_3nb%}8T`Chs482ZkvJhPcGX?jMR}=ah zTZDVQSSASC6SiqO@{GT!Qk?JszB*o9FY#TP6Dko7-f4$6V16IQQ`bDNN^kJC2IR;t zY?SB&z67>8I0W=}iwTS;u3x6J_59+L8+<7^p24|fLiU+*HlGuF3@?Ppk+A-3MnmFl z)qZ;$wA_$w?+0srI|;Kh_%r5`bfl_d$kA>k$+avzku2rs<@<_TvP^;(tTuzj zhE_CzlafJ^=I2x-PY=Nl5R<=t%`qL1pvH4;}21B9;( zkl_bYZ2+YII)|5v`(DLhC^8SK&@Rg;W2>Er#Wa&~W~5#GeHRr{N`OC4&x8mdeH^(Z zSo~{uE-6NJ{V*qLT*hB@@O-Qm!r>wH*J1pN8Ht>Ri`CHLtL;2>NxDqFb41bk*1z+J zhV>B-vfA2MMCt)_#) z3G~quaUUm>*(ov1gX?+|@8-u$!zgCPz9kxLJH$2OO{(l${;)=ie$@*MH+Dtp83U5!%o~k zPQ8KRJ141&WM*HM=`hd+PDS93YX&}Sllg@j-BHpM?!v8!WeV^^4DX@GQ`sea*>H?=b|NHgB}D2V9jt) zJ=prm-}$6M+ZsPel4vwOBmuhqij3Ujz<~(=Z+%`0#*Vm+M8&7Up%ajiBU{{m!_%D9 z1zJjlE#0`HNju{ds8|+m7h{Hj5#iNXfrHNd}8lmEE zQSW{7z*8sq+W$*S6LniEU?Z!#B?GdWkjUeg4$&N$;$N7gqx*-E<^6-zhv(0nSsJz2 UWxWXg`G1#+f~I_}taaG`2PLnS&Hw-a literal 0 HcmV?d00001 diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/static/robots.txt b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/static/robots.txt new file mode 100644 index 0000000000..3c9c7c01f3 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/priv/static/robots.txt @@ -0,0 +1,5 @@ +# See http://www.robotstxt.org/robotstxt.html for documentation on how to use the robots.txt file +# +# To ban all spiders from the entire site uncomment the next two lines: +# User-agent: * +# Disallow: / diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/rel/env.sh.eex b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/rel/env.sh.eex new file mode 100755 index 0000000000..efeb7ffa27 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/rel/env.sh.eex @@ -0,0 +1,13 @@ +#!/bin/sh + +# configure node for distributed erlang with IPV6 support +export ERL_AFLAGS="-proto_dist inet6_tcp" +export ECTO_IPV6="true" +export DNS_CLUSTER_QUERY="${FLY_APP_NAME}.internal" +export RELEASE_DISTRIBUTION="name" +export RELEASE_NODE="${FLY_APP_NAME}-${FLY_IMAGE_REF##*-}@${FLY_PRIVATE_IP}" + +# Uncomment to send crash dumps to stderr +# This can be useful for debugging, but may log sensitive information +# export ERL_CRASH_DUMP=/dev/stderr +# export ERL_CRASH_DUMP_BYTES=4096 diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/rel/overlays/bin/migrate b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/rel/overlays/bin/migrate new file mode 100755 index 0000000000..5ff0ede193 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/rel/overlays/bin/migrate @@ -0,0 +1,3 @@ +#!/bin/sh +cd -P -- "$(dirname -- "$0")" +exec ./hello_elixir eval HelloElixir.Release.migrate diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/rel/overlays/bin/migrate.bat b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/rel/overlays/bin/migrate.bat new file mode 100755 index 0000000000..e67fbfcc57 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/rel/overlays/bin/migrate.bat @@ -0,0 +1 @@ +call "%~dp0\hello_elixir" eval HelloElixir.Release.migrate diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/rel/overlays/bin/server b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/rel/overlays/bin/server new file mode 100755 index 0000000000..2584ba6518 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/rel/overlays/bin/server @@ -0,0 +1,3 @@ +#!/bin/sh +cd -P -- "$(dirname -- "$0")" +PHX_SERVER=true exec ./hello_elixir start diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/rel/overlays/bin/server.bat b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/rel/overlays/bin/server.bat new file mode 100755 index 0000000000..d28959474b --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/rel/overlays/bin/server.bat @@ -0,0 +1,2 @@ +set PHX_SERVER=true +call "%~dp0\hello_elixir" start diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/hello_elixir_web/controllers/page_controller_test.exs b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/hello_elixir_web/controllers/page_controller_test.exs new file mode 100644 index 0000000000..9d038702dc --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/hello_elixir_web/controllers/page_controller_test.exs @@ -0,0 +1,8 @@ +defmodule HelloElixirWeb.PageControllerTest do + use HelloElixirWeb.ConnCase + + test "GET /", %{conn: conn} do + conn = get(conn, "/") + assert html_response(conn, 200) =~ "Welcome to Phoenix!" + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/hello_elixir_web/views/error_view_test.exs b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/hello_elixir_web/views/error_view_test.exs new file mode 100644 index 0000000000..5fd9a978e5 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/hello_elixir_web/views/error_view_test.exs @@ -0,0 +1,14 @@ +defmodule HelloElixirWeb.ErrorViewTest do + use HelloElixirWeb.ConnCase, async: true + + # Bring render/3 and render_to_string/3 for testing custom views + import Phoenix.View + + test "renders 404.html" do + assert render_to_string(HelloElixirWeb.ErrorView, "404.html", []) == "Not Found" + end + + test "renders 500.html" do + assert render_to_string(HelloElixirWeb.ErrorView, "500.html", []) == "Internal Server Error" + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/hello_elixir_web/views/layout_view_test.exs b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/hello_elixir_web/views/layout_view_test.exs new file mode 100644 index 0000000000..d2f4dde964 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/hello_elixir_web/views/layout_view_test.exs @@ -0,0 +1,8 @@ +defmodule HelloElixirWeb.LayoutViewTest do + use HelloElixirWeb.ConnCase, async: true + + # When testing helpers, you may want to import Phoenix.HTML and + # use functions such as safe_to_string() to convert the helper + # result into an HTML string. + # import Phoenix.HTML +end diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/hello_elixir_web/views/page_view_test.exs b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/hello_elixir_web/views/page_view_test.exs new file mode 100644 index 0000000000..56f7b3f364 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/hello_elixir_web/views/page_view_test.exs @@ -0,0 +1,3 @@ +defmodule HelloElixirWeb.PageViewTest do + use HelloElixirWeb.ConnCase, async: true +end diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/support/channel_case.ex b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/support/channel_case.ex new file mode 100644 index 0000000000..bd43e84840 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/support/channel_case.ex @@ -0,0 +1,36 @@ +defmodule HelloElixirWeb.ChannelCase do + @moduledoc """ + This module defines the test case to be used by + channel tests. + + Such tests rely on `Phoenix.ChannelTest` and also + import other functionality to make it easier + to build common data structures and query the data layer. + + Finally, if the test case interacts with the database, + we enable the SQL sandbox, so changes done to the database + are reverted at the end of every test. If you are using + PostgreSQL, you can even run database tests asynchronously + by setting `use HelloElixirWeb.ChannelCase, async: true`, although + this option is not recommended for other databases. + """ + + use ExUnit.CaseTemplate + + using do + quote do + # Import conveniences for testing with channels + import Phoenix.ChannelTest + import HelloElixirWeb.ChannelCase + + # The default endpoint for testing + @endpoint HelloElixirWeb.Endpoint + end + end + + setup tags do + pid = Ecto.Adapters.SQL.Sandbox.start_owner!(HelloElixir.Repo, shared: not tags[:async]) + on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(pid) end) + :ok + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/support/conn_case.ex b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/support/conn_case.ex new file mode 100644 index 0000000000..bb54608c26 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/support/conn_case.ex @@ -0,0 +1,39 @@ +defmodule HelloElixirWeb.ConnCase do + @moduledoc """ + This module defines the test case to be used by + tests that require setting up a connection. + + Such tests rely on `Phoenix.ConnTest` and also + import other functionality to make it easier + to build common data structures and query the data layer. + + Finally, if the test case interacts with the database, + we enable the SQL sandbox, so changes done to the database + are reverted at the end of every test. If you are using + PostgreSQL, you can even run database tests asynchronously + by setting `use HelloElixirWeb.ConnCase, async: true`, although + this option is not recommended for other databases. + """ + + use ExUnit.CaseTemplate + + using do + quote do + # Import conveniences for testing with connections + import Plug.Conn + import Phoenix.ConnTest + import HelloElixirWeb.ConnCase + + alias HelloElixirWeb.Router.Helpers, as: Routes + + # The default endpoint for testing + @endpoint HelloElixirWeb.Endpoint + end + end + + setup tags do + pid = Ecto.Adapters.SQL.Sandbox.start_owner!(HelloElixir.Repo, shared: not tags[:async]) + on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(pid) end) + {:ok, conn: Phoenix.ConnTest.build_conn()} + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/support/data_case.ex b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/support/data_case.ex new file mode 100644 index 0000000000..ec92db34bc --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/support/data_case.ex @@ -0,0 +1,51 @@ +defmodule HelloElixir.DataCase do + @moduledoc """ + This module defines the setup for tests requiring + access to the application's data layer. + + You may define functions here to be used as helpers in + your tests. + + Finally, if the test case interacts with the database, + we enable the SQL sandbox, so changes done to the database + are reverted at the end of every test. If you are using + PostgreSQL, you can even run database tests asynchronously + by setting `use HelloElixir.DataCase, async: true`, although + this option is not recommended for other databases. + """ + + use ExUnit.CaseTemplate + + using do + quote do + alias HelloElixir.Repo + + import Ecto + import Ecto.Changeset + import Ecto.Query + import HelloElixir.DataCase + end + end + + setup tags do + pid = Ecto.Adapters.SQL.Sandbox.start_owner!(HelloElixir.Repo, shared: not tags[:async]) + on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(pid) end) + :ok + end + + @doc """ + A helper that transforms changeset errors into a map of messages. + + assert {:error, changeset} = Accounts.create_user(%{password: "short"}) + assert "password is too short" in errors_on(changeset).password + assert %{password: ["password is too short"]} = errors_on(changeset) + + """ + def errors_on(changeset) do + Ecto.Changeset.traverse_errors(changeset, fn {message, opts} -> + Regex.replace(~r"%{(\w+)}", message, fn _, key -> + opts |> Keyword.get(String.to_existing_atom(key), key) |> to_string() + end) + end) + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/test_helper.exs b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/test_helper.exs new file mode 100644 index 0000000000..f69e09f589 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/test/test_helper.exs @@ -0,0 +1,2 @@ +ExUnit.start() +Ecto.Adapters.SQL.Sandbox.mode(HelloElixir.Repo, :manual) diff --git a/test/fixtures/deploy-phoenix-sqlite/.dockerignore b/test/fixtures/deploy-phoenix-sqlite/.dockerignore new file mode 100644 index 0000000000..61a73933c8 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/.dockerignore @@ -0,0 +1,45 @@ +# This file excludes paths from the Docker build context. +# +# By default, Docker's build context includes all files (and folders) in the +# current directory. Even if a file isn't copied into the container it is still sent to +# the Docker daemon. +# +# There are multiple reasons to exclude files from the build context: +# +# 1. Prevent nested folders from being copied into the container (ex: exclude +# /assets/node_modules when copying /assets) +# 2. Reduce the size of the build context and improve build time (ex. /build, /deps, /doc) +# 3. Avoid sending files containing sensitive information +# +# More information on using .dockerignore is available here: +# https://docs.docker.com/engine/reference/builder/#dockerignore-file + +.dockerignore + +# Ignore git, but keep git HEAD and refs to access current commit hash if needed: +# +# $ cat .git/HEAD | awk '{print ".git/"$2}' | xargs cat +# d0b8727759e1e0e7aa3d41707d12376e373d5ecc +.git +!.git/HEAD +!.git/refs + +# Common development/test artifacts +/cover/ +/doc/ +/test/ +/tmp/ +.elixir_ls + +# Mix artifacts +/_build/ +/deps/ +*.ez + +# Generated on crash by the VM +erl_crash.dump + +# Static artifacts - These should be fetched and built inside the Docker image +/assets/node_modules/ +/priv/static/assets/ +/priv/static/cache_manifest.json diff --git a/test/fixtures/deploy-phoenix-sqlite/.formatter.exs b/test/fixtures/deploy-phoenix-sqlite/.formatter.exs new file mode 100644 index 0000000000..ef8840ce6f --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/.formatter.exs @@ -0,0 +1,6 @@ +[ + import_deps: [:ecto, :ecto_sql, :phoenix], + subdirectories: ["priv/*/migrations"], + plugins: [Phoenix.LiveView.HTMLFormatter], + inputs: ["*.{heex,ex,exs}", "{config,lib,test}/**/*.{heex,ex,exs}", "priv/*/seeds.exs"] +] diff --git a/test/fixtures/deploy-phoenix-sqlite/.gitignore b/test/fixtures/deploy-phoenix-sqlite/.gitignore new file mode 100644 index 0000000000..84ddde453e --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/.gitignore @@ -0,0 +1,41 @@ +# The directory Mix will write compiled artifacts to. +/_build/ + +# If you run "mix test --cover", coverage assets end up here. +/cover/ + +# The directory Mix downloads your dependencies sources to. +/deps/ + +# Where 3rd-party dependencies like ExDoc output generated docs. +/doc/ + +# Ignore .fetch files in case you like to edit your project deps locally. +/.fetch + +# If the VM crashes, it generates a dump, let's ignore it too. +erl_crash.dump + +# Also ignore archive artifacts (built via "mix archive.build"). +*.ez + +# Temporary files, for example, from tests. +/tmp/ + +# Ignore package tarball (built via "mix hex.build"). +deploy_phoenix_sqlite-*.tar + +# Ignore assets that are produced by build tools. +/priv/static/assets/ + +# Ignore digested assets cache. +/priv/static/cache_manifest.json + +# In case you use Node.js/npm, you want to ignore these. +npm-debug.log +/assets/node_modules/ + +# Database files +*.db +*.db-* + diff --git a/test/fixtures/deploy-phoenix-sqlite/Dockerfile b/test/fixtures/deploy-phoenix-sqlite/Dockerfile new file mode 100644 index 0000000000..bd94820b61 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/Dockerfile @@ -0,0 +1,97 @@ +# Find eligible builder and runner images on Docker Hub. We use Ubuntu/Debian +# instead of Alpine to avoid DNS resolution issues in production. +# +# https://hub.docker.com/r/hexpm/elixir/tags?page=1&name=ubuntu +# https://hub.docker.com/_/ubuntu?tab=tags +# +# This file is based on these images: +# +# - https://hub.docker.com/r/hexpm/elixir/tags - for the build image +# - https://hub.docker.com/_/debian?tab=tags&page=1&name=bullseye-20240904-slim - for the release image +# - https://pkgs.org/ - resource for finding needed packages +# - Ex: hexpm/elixir:1.16.3-erlang-26.2.5.2-debian-bullseye-20240904-slim +# +ARG ELIXIR_VERSION=1.16.3 +ARG OTP_VERSION=26.2.5.2 +ARG DEBIAN_VERSION=bullseye-20240904-slim + +ARG BUILDER_IMAGE="hexpm/elixir:${ELIXIR_VERSION}-erlang-${OTP_VERSION}-debian-${DEBIAN_VERSION}" +ARG RUNNER_IMAGE="debian:${DEBIAN_VERSION}" + +FROM ${BUILDER_IMAGE} as builder + +# install build dependencies +RUN apt-get update -y && apt-get install -y build-essential git \ + && apt-get clean && rm -f /var/lib/apt/lists/*_* + +# prepare build dir +WORKDIR /app + +# install hex + rebar +RUN mix local.hex --force && \ + mix local.rebar --force + +# set build ENV +ENV MIX_ENV="prod" + +# install mix dependencies +COPY mix.exs mix.lock ./ +RUN mix deps.get --only $MIX_ENV +RUN mkdir config + +# copy compile-time config files before we compile dependencies +# to ensure any relevant config change will trigger the dependencies +# to be re-compiled. +COPY config/config.exs config/${MIX_ENV}.exs config/ +RUN mix deps.compile + +COPY priv priv + +COPY lib lib + +COPY assets assets + +# compile assets +RUN mix assets.deploy + +# Compile the release +RUN mix compile + +# Changes to config/runtime.exs don't require recompiling the code +COPY config/runtime.exs config/ + +COPY rel rel +RUN mix release + +# start a new build stage so that the final image will only contain +# the compiled release and other runtime necessities +FROM ${RUNNER_IMAGE} + +RUN apt-get update -y && \ + apt-get install -y libstdc++6 openssl libncurses5 locales ca-certificates \ + && apt-get clean && rm -f /var/lib/apt/lists/*_* + +# Set the locale +RUN sed -i '/en_US.UTF-8/s/^# //g' /etc/locale.gen && locale-gen + +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US:en +ENV LC_ALL en_US.UTF-8 + +WORKDIR "/app" +RUN chown nobody /app + +# set runner ENV +ENV MIX_ENV="prod" + +# Only copy the final release from the build stage +COPY --from=builder --chown=nobody:root /app/_build/${MIX_ENV}/rel/deploy_phoenix_sqlite ./ + +USER nobody + +# If using an environment that doesn't automatically reap zombie processes, it is +# advised to add an init process such as tini via `apt-get install` +# above and adding an entrypoint. See https://github.com/krallin/tini for details +# ENTRYPOINT ["/tini", "--"] + +CMD ["/app/bin/server"] diff --git a/test/fixtures/deploy-phoenix-sqlite/README.md b/test/fixtures/deploy-phoenix-sqlite/README.md new file mode 100644 index 0000000000..e884a20a52 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/README.md @@ -0,0 +1,18 @@ +# DeployPhoenixSqlite + +To start your Phoenix server: + + * Run `mix setup` to install and setup dependencies + * Start Phoenix endpoint with `mix phx.server` or inside IEx with `iex -S mix phx.server` + +Now you can visit [`localhost:4000`](http://localhost:4000) from your browser. + +Ready to run in production? Please [check our deployment guides](https://hexdocs.pm/phoenix/deployment.html). + +## Learn more + + * Official website: https://www.phoenixframework.org/ + * Guides: https://hexdocs.pm/phoenix/overview.html + * Docs: https://hexdocs.pm/phoenix + * Forum: https://elixirforum.com/c/phoenix-forum + * Source: https://github.com/phoenixframework/phoenix diff --git a/test/fixtures/deploy-phoenix-sqlite/assets/css/app.css b/test/fixtures/deploy-phoenix-sqlite/assets/css/app.css new file mode 100644 index 0000000000..378c8f9056 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/assets/css/app.css @@ -0,0 +1,5 @@ +@import "tailwindcss/base"; +@import "tailwindcss/components"; +@import "tailwindcss/utilities"; + +/* This file is for your main application CSS */ diff --git a/test/fixtures/deploy-phoenix-sqlite/assets/js/app.js b/test/fixtures/deploy-phoenix-sqlite/assets/js/app.js new file mode 100644 index 0000000000..d5e278afe5 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/assets/js/app.js @@ -0,0 +1,44 @@ +// If you want to use Phoenix channels, run `mix help phx.gen.channel` +// to get started and then uncomment the line below. +// import "./user_socket.js" + +// You can include dependencies in two ways. +// +// The simplest option is to put them in assets/vendor and +// import them using relative paths: +// +// import "../vendor/some-package.js" +// +// Alternatively, you can `npm install some-package --prefix assets` and import +// them using a path starting with the package name: +// +// import "some-package" +// + +// Include phoenix_html to handle method=PUT/DELETE in forms and buttons. +import "phoenix_html" +// Establish Phoenix Socket and LiveView configuration. +import {Socket} from "phoenix" +import {LiveSocket} from "phoenix_live_view" +import topbar from "../vendor/topbar" + +let csrfToken = document.querySelector("meta[name='csrf-token']").getAttribute("content") +let liveSocket = new LiveSocket("/live", Socket, { + longPollFallbackMs: 2500, + params: {_csrf_token: csrfToken} +}) + +// Show progress bar on live navigation and form submits +topbar.config({barColors: {0: "#29d"}, shadowColor: "rgba(0, 0, 0, .3)"}) +window.addEventListener("phx:page-loading-start", _info => topbar.show(300)) +window.addEventListener("phx:page-loading-stop", _info => topbar.hide()) + +// connect if there are any LiveViews on the page +liveSocket.connect() + +// expose liveSocket on window for web console debug logs and latency simulation: +// >> liveSocket.enableDebug() +// >> liveSocket.enableLatencySim(1000) // enabled for duration of browser session +// >> liveSocket.disableLatencySim() +window.liveSocket = liveSocket + diff --git a/test/fixtures/deploy-phoenix-sqlite/assets/tailwind.config.js b/test/fixtures/deploy-phoenix-sqlite/assets/tailwind.config.js new file mode 100644 index 0000000000..5cc1428a83 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/assets/tailwind.config.js @@ -0,0 +1,74 @@ +// See the Tailwind configuration guide for advanced usage +// https://tailwindcss.com/docs/configuration + +const plugin = require("tailwindcss/plugin") +const fs = require("fs") +const path = require("path") + +module.exports = { + content: [ + "./js/**/*.js", + "../lib/deploy_phoenix_sqlite_web.ex", + "../lib/deploy_phoenix_sqlite_web/**/*.*ex" + ], + theme: { + extend: { + colors: { + brand: "#FD4F00", + } + }, + }, + plugins: [ + require("@tailwindcss/forms"), + // Allows prefixing tailwind classes with LiveView classes to add rules + // only when LiveView classes are applied, for example: + // + //
+ // + plugin(({addVariant}) => addVariant("phx-click-loading", [".phx-click-loading&", ".phx-click-loading &"])), + plugin(({addVariant}) => addVariant("phx-submit-loading", [".phx-submit-loading&", ".phx-submit-loading &"])), + plugin(({addVariant}) => addVariant("phx-change-loading", [".phx-change-loading&", ".phx-change-loading &"])), + + // Embeds Heroicons (https://heroicons.com) into your app.css bundle + // See your `CoreComponents.icon/1` for more information. + // + plugin(function({matchComponents, theme}) { + let iconsDir = path.join(__dirname, "../deps/heroicons/optimized") + let values = {} + let icons = [ + ["", "/24/outline"], + ["-solid", "/24/solid"], + ["-mini", "/20/solid"], + ["-micro", "/16/solid"] + ] + icons.forEach(([suffix, dir]) => { + fs.readdirSync(path.join(iconsDir, dir)).forEach(file => { + let name = path.basename(file, ".svg") + suffix + values[name] = {name, fullPath: path.join(iconsDir, dir, file)} + }) + }) + matchComponents({ + "hero": ({name, fullPath}) => { + let content = fs.readFileSync(fullPath).toString().replace(/\r?\n|\r/g, "") + let size = theme("spacing.6") + if (name.endsWith("-mini")) { + size = theme("spacing.5") + } else if (name.endsWith("-micro")) { + size = theme("spacing.4") + } + return { + [`--hero-${name}`]: `url('data:image/svg+xml;utf8,${content}')`, + "-webkit-mask": `var(--hero-${name})`, + "mask": `var(--hero-${name})`, + "mask-repeat": "no-repeat", + "background-color": "currentColor", + "vertical-align": "middle", + "display": "inline-block", + "width": size, + "height": size + } + } + }, {values}) + }) + ] +} diff --git a/test/fixtures/deploy-phoenix-sqlite/assets/vendor/topbar.js b/test/fixtures/deploy-phoenix-sqlite/assets/vendor/topbar.js new file mode 100644 index 0000000000..41957274d7 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/assets/vendor/topbar.js @@ -0,0 +1,165 @@ +/** + * @license MIT + * topbar 2.0.0, 2023-02-04 + * https://buunguyen.github.io/topbar + * Copyright (c) 2021 Buu Nguyen + */ +(function (window, document) { + "use strict"; + + // https://gist.github.com/paulirish/1579671 + (function () { + var lastTime = 0; + var vendors = ["ms", "moz", "webkit", "o"]; + for (var x = 0; x < vendors.length && !window.requestAnimationFrame; ++x) { + window.requestAnimationFrame = + window[vendors[x] + "RequestAnimationFrame"]; + window.cancelAnimationFrame = + window[vendors[x] + "CancelAnimationFrame"] || + window[vendors[x] + "CancelRequestAnimationFrame"]; + } + if (!window.requestAnimationFrame) + window.requestAnimationFrame = function (callback, element) { + var currTime = new Date().getTime(); + var timeToCall = Math.max(0, 16 - (currTime - lastTime)); + var id = window.setTimeout(function () { + callback(currTime + timeToCall); + }, timeToCall); + lastTime = currTime + timeToCall; + return id; + }; + if (!window.cancelAnimationFrame) + window.cancelAnimationFrame = function (id) { + clearTimeout(id); + }; + })(); + + var canvas, + currentProgress, + showing, + progressTimerId = null, + fadeTimerId = null, + delayTimerId = null, + addEvent = function (elem, type, handler) { + if (elem.addEventListener) elem.addEventListener(type, handler, false); + else if (elem.attachEvent) elem.attachEvent("on" + type, handler); + else elem["on" + type] = handler; + }, + options = { + autoRun: true, + barThickness: 3, + barColors: { + 0: "rgba(26, 188, 156, .9)", + ".25": "rgba(52, 152, 219, .9)", + ".50": "rgba(241, 196, 15, .9)", + ".75": "rgba(230, 126, 34, .9)", + "1.0": "rgba(211, 84, 0, .9)", + }, + shadowBlur: 10, + shadowColor: "rgba(0, 0, 0, .6)", + className: null, + }, + repaint = function () { + canvas.width = window.innerWidth; + canvas.height = options.barThickness * 5; // need space for shadow + + var ctx = canvas.getContext("2d"); + ctx.shadowBlur = options.shadowBlur; + ctx.shadowColor = options.shadowColor; + + var lineGradient = ctx.createLinearGradient(0, 0, canvas.width, 0); + for (var stop in options.barColors) + lineGradient.addColorStop(stop, options.barColors[stop]); + ctx.lineWidth = options.barThickness; + ctx.beginPath(); + ctx.moveTo(0, options.barThickness / 2); + ctx.lineTo( + Math.ceil(currentProgress * canvas.width), + options.barThickness / 2 + ); + ctx.strokeStyle = lineGradient; + ctx.stroke(); + }, + createCanvas = function () { + canvas = document.createElement("canvas"); + var style = canvas.style; + style.position = "fixed"; + style.top = style.left = style.right = style.margin = style.padding = 0; + style.zIndex = 100001; + style.display = "none"; + if (options.className) canvas.classList.add(options.className); + document.body.appendChild(canvas); + addEvent(window, "resize", repaint); + }, + topbar = { + config: function (opts) { + for (var key in opts) + if (options.hasOwnProperty(key)) options[key] = opts[key]; + }, + show: function (delay) { + if (showing) return; + if (delay) { + if (delayTimerId) return; + delayTimerId = setTimeout(() => topbar.show(), delay); + } else { + showing = true; + if (fadeTimerId !== null) window.cancelAnimationFrame(fadeTimerId); + if (!canvas) createCanvas(); + canvas.style.opacity = 1; + canvas.style.display = "block"; + topbar.progress(0); + if (options.autoRun) { + (function loop() { + progressTimerId = window.requestAnimationFrame(loop); + topbar.progress( + "+" + 0.05 * Math.pow(1 - Math.sqrt(currentProgress), 2) + ); + })(); + } + } + }, + progress: function (to) { + if (typeof to === "undefined") return currentProgress; + if (typeof to === "string") { + to = + (to.indexOf("+") >= 0 || to.indexOf("-") >= 0 + ? currentProgress + : 0) + parseFloat(to); + } + currentProgress = to > 1 ? 1 : to; + repaint(); + return currentProgress; + }, + hide: function () { + clearTimeout(delayTimerId); + delayTimerId = null; + if (!showing) return; + showing = false; + if (progressTimerId != null) { + window.cancelAnimationFrame(progressTimerId); + progressTimerId = null; + } + (function loop() { + if (topbar.progress("+.1") >= 1) { + canvas.style.opacity -= 0.05; + if (canvas.style.opacity <= 0.05) { + canvas.style.display = "none"; + fadeTimerId = null; + return; + } + } + fadeTimerId = window.requestAnimationFrame(loop); + })(); + }, + }; + + if (typeof module === "object" && typeof module.exports === "object") { + module.exports = topbar; + } else if (typeof define === "function" && define.amd) { + define(function () { + return topbar; + }); + } else { + this.topbar = topbar; + } +}.call(this, window, document)); diff --git a/test/fixtures/deploy-phoenix-sqlite/config/config.exs b/test/fixtures/deploy-phoenix-sqlite/config/config.exs new file mode 100644 index 0000000000..94e9540868 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/config/config.exs @@ -0,0 +1,66 @@ +# This file is responsible for configuring your application +# and its dependencies with the aid of the Config module. +# +# This configuration file is loaded before any dependency and +# is restricted to this project. + +# General application configuration +import Config + +config :deploy_phoenix_sqlite, + ecto_repos: [DeployPhoenixSqlite.Repo], + generators: [timestamp_type: :utc_datetime] + +# Configures the endpoint +config :deploy_phoenix_sqlite, DeployPhoenixSqliteWeb.Endpoint, + url: [host: "localhost"], + adapter: Bandit.PhoenixAdapter, + render_errors: [ + formats: [html: DeployPhoenixSqliteWeb.ErrorHTML, json: DeployPhoenixSqliteWeb.ErrorJSON], + layout: false + ], + pubsub_server: DeployPhoenixSqlite.PubSub, + live_view: [signing_salt: "Hyy5zhX4"] + +# Configures the mailer +# +# By default it uses the "Local" adapter which stores the emails +# locally. You can see the emails in your browser, at "/dev/mailbox". +# +# For production it's recommended to configure a different adapter +# at the `config/runtime.exs`. +config :deploy_phoenix_sqlite, DeployPhoenixSqlite.Mailer, adapter: Swoosh.Adapters.Local + +# Configure esbuild (the version is required) +config :esbuild, + version: "0.17.11", + deploy_phoenix_sqlite: [ + args: + ~w(js/app.js --bundle --target=es2017 --outdir=../priv/static/assets --external:/fonts/* --external:/images/*), + cd: Path.expand("../assets", __DIR__), + env: %{"NODE_PATH" => Path.expand("../deps", __DIR__)} + ] + +# Configure tailwind (the version is required) +config :tailwind, + version: "3.4.3", + deploy_phoenix_sqlite: [ + args: ~w( + --config=tailwind.config.js + --input=css/app.css + --output=../priv/static/assets/app.css + ), + cd: Path.expand("../assets", __DIR__) + ] + +# Configures Elixir's Logger +config :logger, :console, + format: "$time $metadata[$level] $message\n", + metadata: [:request_id] + +# Use Jason for JSON parsing in Phoenix +config :phoenix, :json_library, Jason + +# Import environment specific config. This must remain at the bottom +# of this file so it overrides the configuration defined above. +import_config "#{config_env()}.exs" diff --git a/test/fixtures/deploy-phoenix-sqlite/config/dev.exs b/test/fixtures/deploy-phoenix-sqlite/config/dev.exs new file mode 100644 index 0000000000..f4c700fa29 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/config/dev.exs @@ -0,0 +1,83 @@ +import Config + +# Configure your database +config :deploy_phoenix_sqlite, DeployPhoenixSqlite.Repo, + database: Path.expand("../deploy_phoenix_sqlite_dev.db", __DIR__), + pool_size: 5, + stacktrace: true, + show_sensitive_data_on_connection_error: true + +# For development, we disable any cache and enable +# debugging and code reloading. +# +# The watchers configuration can be used to run external +# watchers to your application. For example, we can use it +# to bundle .js and .css sources. +config :deploy_phoenix_sqlite, DeployPhoenixSqliteWeb.Endpoint, + # Binding to loopback ipv4 address prevents access from other machines. + # Change to `ip: {0, 0, 0, 0}` to allow access from other machines. + http: [ip: {127, 0, 0, 1}, port: 4000], + check_origin: false, + code_reloader: true, + debug_errors: true, + secret_key_base: "+Q20TRJg/2MPOY/hSP7py5ijAj3Rfbi5QjJx/p76zsGhIoYaCUb02X98p7Fj1DOK", + watchers: [ + esbuild: + {Esbuild, :install_and_run, [:deploy_phoenix_sqlite, ~w(--sourcemap=inline --watch)]}, + tailwind: {Tailwind, :install_and_run, [:deploy_phoenix_sqlite, ~w(--watch)]} + ] + +# ## SSL Support +# +# In order to use HTTPS in development, a self-signed +# certificate can be generated by running the following +# Mix task: +# +# mix phx.gen.cert +# +# Run `mix help phx.gen.cert` for more information. +# +# The `http:` config above can be replaced with: +# +# https: [ +# port: 4001, +# cipher_suite: :strong, +# keyfile: "priv/cert/selfsigned_key.pem", +# certfile: "priv/cert/selfsigned.pem" +# ], +# +# If desired, both `http:` and `https:` keys can be +# configured to run both http and https servers on +# different ports. + +# Watch static and templates for browser reloading. +config :deploy_phoenix_sqlite, DeployPhoenixSqliteWeb.Endpoint, + live_reload: [ + patterns: [ + ~r"priv/static/(?!uploads/).*(js|css|png|jpeg|jpg|gif|svg)$", + ~r"priv/gettext/.*(po)$", + ~r"lib/deploy_phoenix_sqlite_web/(controllers|live|components)/.*(ex|heex)$" + ] + ] + +# Enable dev routes for dashboard and mailbox +config :deploy_phoenix_sqlite, dev_routes: true + +# Do not include metadata nor timestamps in development logs +config :logger, :console, format: "[$level] $message\n" + +# Set a higher stacktrace during development. Avoid configuring such +# in production as building large stacktraces may be expensive. +config :phoenix, :stacktrace_depth, 20 + +# Initialize plugs at runtime for faster development compilation +config :phoenix, :plug_init_mode, :runtime + +config :phoenix_live_view, + # Include HEEx debug annotations as HTML comments in rendered markup + debug_heex_annotations: true, + # Enable helpful, but potentially expensive runtime checks + enable_expensive_runtime_checks: true + +# Disable swoosh api client as it is only required for production adapters. +config :swoosh, :api_client, false diff --git a/test/fixtures/deploy-phoenix-sqlite/config/prod.exs b/test/fixtures/deploy-phoenix-sqlite/config/prod.exs new file mode 100644 index 0000000000..3f98fb85a6 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/config/prod.exs @@ -0,0 +1,21 @@ +import Config + +# Note we also include the path to a cache manifest +# containing the digested version of static files. This +# manifest is generated by the `mix assets.deploy` task, +# which you should run after static files are built and +# before starting your production server. +config :deploy_phoenix_sqlite, DeployPhoenixSqliteWeb.Endpoint, + cache_static_manifest: "priv/static/cache_manifest.json" + +# Configures Swoosh API Client +config :swoosh, api_client: Swoosh.ApiClient.Finch, finch_name: DeployPhoenixSqlite.Finch + +# Disable Swoosh Local Memory Storage +config :swoosh, local: false + +# Do not print debug messages in production +config :logger, level: :info + +# Runtime production configuration, including reading +# of environment variables, is done on config/runtime.exs. diff --git a/test/fixtures/deploy-phoenix-sqlite/config/runtime.exs b/test/fixtures/deploy-phoenix-sqlite/config/runtime.exs new file mode 100644 index 0000000000..639e851271 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/config/runtime.exs @@ -0,0 +1,113 @@ +import Config + +# config/runtime.exs is executed for all environments, including +# during releases. It is executed after compilation and before the +# system starts, so it is typically used to load production configuration +# and secrets from environment variables or elsewhere. Do not define +# any compile-time configuration in here, as it won't be applied. +# The block below contains prod specific runtime configuration. + +# ## Using releases +# +# If you use `mix release`, you need to explicitly enable the server +# by passing the PHX_SERVER=true when you start it: +# +# PHX_SERVER=true bin/deploy_phoenix_sqlite start +# +# Alternatively, you can use `mix phx.gen.release` to generate a `bin/server` +# script that automatically sets the env var above. +if System.get_env("PHX_SERVER") do + config :deploy_phoenix_sqlite, DeployPhoenixSqliteWeb.Endpoint, server: true +end + +if config_env() == :prod do + database_path = + System.get_env("DATABASE_PATH") || + raise """ + environment variable DATABASE_PATH is missing. + For example: /etc/deploy_phoenix_sqlite/deploy_phoenix_sqlite.db + """ + + config :deploy_phoenix_sqlite, DeployPhoenixSqlite.Repo, + database: database_path, + pool_size: String.to_integer(System.get_env("POOL_SIZE") || "5") + + # The secret key base is used to sign/encrypt cookies and other secrets. + # A default value is used in config/dev.exs and config/test.exs but you + # want to use a different value for prod and you most likely don't want + # to check this value into version control, so we use an environment + # variable instead. + secret_key_base = + System.get_env("SECRET_KEY_BASE") || + raise """ + environment variable SECRET_KEY_BASE is missing. + You can generate one by calling: mix phx.gen.secret + """ + + host = System.get_env("PHX_HOST") || "example.com" + port = String.to_integer(System.get_env("PORT") || "4000") + + config :deploy_phoenix_sqlite, :dns_cluster_query, System.get_env("DNS_CLUSTER_QUERY") + + config :deploy_phoenix_sqlite, DeployPhoenixSqliteWeb.Endpoint, + url: [host: host, port: 443, scheme: "https"], + http: [ + # Enable IPv6 and bind on all interfaces. + # Set it to {0, 0, 0, 0, 0, 0, 0, 1} for local network only access. + # See the documentation on https://hexdocs.pm/bandit/Bandit.html#t:options/0 + # for details about using IPv6 vs IPv4 and loopback vs public addresses. + ip: {0, 0, 0, 0, 0, 0, 0, 0}, + port: port + ], + secret_key_base: secret_key_base + + # ## SSL Support + # + # To get SSL working, you will need to add the `https` key + # to your endpoint configuration: + # + # config :deploy_phoenix_sqlite, DeployPhoenixSqliteWeb.Endpoint, + # https: [ + # ..., + # port: 443, + # cipher_suite: :strong, + # keyfile: System.get_env("SOME_APP_SSL_KEY_PATH"), + # certfile: System.get_env("SOME_APP_SSL_CERT_PATH") + # ] + # + # The `cipher_suite` is set to `:strong` to support only the + # latest and more secure SSL ciphers. This means old browsers + # and clients may not be supported. You can set it to + # `:compatible` for wider support. + # + # `:keyfile` and `:certfile` expect an absolute path to the key + # and cert in disk or a relative path inside priv, for example + # "priv/ssl/server.key". For all supported SSL configuration + # options, see https://hexdocs.pm/plug/Plug.SSL.html#configure/1 + # + # We also recommend setting `force_ssl` in your config/prod.exs, + # ensuring no data is ever sent via http, always redirecting to https: + # + # config :deploy_phoenix_sqlite, DeployPhoenixSqliteWeb.Endpoint, + # force_ssl: [hsts: true] + # + # Check `Plug.SSL` for all available options in `force_ssl`. + + # ## Configuring the mailer + # + # In production you need to configure the mailer to use a different adapter. + # Also, you may need to configure the Swoosh API client of your choice if you + # are not using SMTP. Here is an example of the configuration: + # + # config :deploy_phoenix_sqlite, DeployPhoenixSqlite.Mailer, + # adapter: Swoosh.Adapters.Mailgun, + # api_key: System.get_env("MAILGUN_API_KEY"), + # domain: System.get_env("MAILGUN_DOMAIN") + # + # For this example you need include a HTTP client required by Swoosh API client. + # Swoosh supports Hackney and Finch out of the box: + # + # config :swoosh, :api_client, Swoosh.ApiClient.Hackney + # + # See https://hexdocs.pm/swoosh/Swoosh.html#module-installation for details. +end diff --git a/test/fixtures/deploy-phoenix-sqlite/config/test.exs b/test/fixtures/deploy-phoenix-sqlite/config/test.exs new file mode 100644 index 0000000000..f89079f50f --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/config/test.exs @@ -0,0 +1,34 @@ +import Config + +# Configure your database +# +# The MIX_TEST_PARTITION environment variable can be used +# to provide built-in test partitioning in CI environment. +# Run `mix help test` for more information. +config :deploy_phoenix_sqlite, DeployPhoenixSqlite.Repo, + database: Path.expand("../deploy_phoenix_sqlite_test.db", __DIR__), + pool_size: 5, + pool: Ecto.Adapters.SQL.Sandbox + +# We don't run a server during test. If one is required, +# you can enable the server option below. +config :deploy_phoenix_sqlite, DeployPhoenixSqliteWeb.Endpoint, + http: [ip: {127, 0, 0, 1}, port: 4002], + secret_key_base: "5u0cTq865n2ADnxzovY4YTPMuAoh2ed/bd7cagcv5jkADli701+c4tcl/H7Hqmp3", + server: false + +# In test we don't send emails +config :deploy_phoenix_sqlite, DeployPhoenixSqlite.Mailer, adapter: Swoosh.Adapters.Test + +# Disable swoosh api client as it is only required for production adapters +config :swoosh, :api_client, false + +# Print only warnings and errors during test +config :logger, level: :warning + +# Initialize plugs at runtime for faster test compilation +config :phoenix, :plug_init_mode, :runtime + +# Enable helpful, but potentially expensive runtime checks +config :phoenix_live_view, + enable_expensive_runtime_checks: true diff --git a/test/fixtures/deploy-phoenix-sqlite/fly.toml b/test/fixtures/deploy-phoenix-sqlite/fly.toml new file mode 100644 index 0000000000..3677b7bbf3 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/fly.toml @@ -0,0 +1,37 @@ +# fly.toml app configuration file generated for deploy-phoenix-sqlite on 2024-12-09T11:23:11-03:00 +# +# See https://fly.io/docs/reference/configuration/ for information about how to use this file. +# + +kill_signal = 'SIGTERM' + +[build] + +[env] + DATABASE_PATH = '/mnt/name/name.db' + PHX_HOST = 'deploy-phoenix-sqlite.fly.dev' + PORT = '8080' + SECRET_KEY_BASE = '/28BVC30oMsrUtq0VMBmfxF7zQhjEELRUoNtJOvyEOj7P5YbB7FN6S47KkWyQNcv' + +[[mounts]] + source = 'name' + destination = '/mnt/name' + +[http_service] + internal_port = 8080 + force_https = true + auto_stop_machines = 'stop' + auto_start_machines = true + min_machines_running = 0 + processes = ['app'] + + [http_service.concurrency] + type = 'connections' + hard_limit = 1000 + soft_limit = 1000 + +[[vm]] + memory = '1gb' + cpu_kind = 'shared' + cpus = 1 + memory_mb = 1024 diff --git a/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite.ex b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite.ex new file mode 100644 index 0000000000..eb4cc3ea9d --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite.ex @@ -0,0 +1,9 @@ +defmodule DeployPhoenixSqlite do + @moduledoc """ + DeployPhoenixSqlite keeps the contexts that define your domain + and business logic. + + Contexts are also responsible for managing your data, regardless + if it comes from the database, an external API or others. + """ +end diff --git a/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite/application.ex b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite/application.ex new file mode 100644 index 0000000000..1f75b26149 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite/application.ex @@ -0,0 +1,44 @@ +defmodule DeployPhoenixSqlite.Application do + # See https://hexdocs.pm/elixir/Application.html + # for more information on OTP Applications + @moduledoc false + + use Application + + @impl true + def start(_type, _args) do + children = [ + DeployPhoenixSqliteWeb.Telemetry, + DeployPhoenixSqlite.Repo, + {Ecto.Migrator, + repos: Application.fetch_env!(:deploy_phoenix_sqlite, :ecto_repos), + skip: skip_migrations?()}, + {DNSCluster, query: Application.get_env(:deploy_phoenix_sqlite, :dns_cluster_query) || :ignore}, + {Phoenix.PubSub, name: DeployPhoenixSqlite.PubSub}, + # Start the Finch HTTP client for sending emails + {Finch, name: DeployPhoenixSqlite.Finch}, + # Start a worker by calling: DeployPhoenixSqlite.Worker.start_link(arg) + # {DeployPhoenixSqlite.Worker, arg}, + # Start to serve requests, typically the last entry + DeployPhoenixSqliteWeb.Endpoint + ] + + # See https://hexdocs.pm/elixir/Supervisor.html + # for other strategies and supported options + opts = [strategy: :one_for_one, name: DeployPhoenixSqlite.Supervisor] + Supervisor.start_link(children, opts) + end + + # Tell Phoenix to update the endpoint configuration + # whenever the application is updated. + @impl true + def config_change(changed, _new, removed) do + DeployPhoenixSqliteWeb.Endpoint.config_change(changed, removed) + :ok + end + + defp skip_migrations?() do + # By default, sqlite migrations are run when using a release + System.get_env("RELEASE_NAME") != nil + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite/mailer.ex b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite/mailer.ex new file mode 100644 index 0000000000..58949297de --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite/mailer.ex @@ -0,0 +1,3 @@ +defmodule DeployPhoenixSqlite.Mailer do + use Swoosh.Mailer, otp_app: :deploy_phoenix_sqlite +end diff --git a/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite/release.ex b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite/release.ex new file mode 100644 index 0000000000..7e61de03e0 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite/release.ex @@ -0,0 +1,28 @@ +defmodule DeployPhoenixSqlite.Release do + @moduledoc """ + Used for executing DB release tasks when run in production without Mix + installed. + """ + @app :deploy_phoenix_sqlite + + def migrate do + load_app() + + for repo <- repos() do + {:ok, _, _} = Ecto.Migrator.with_repo(repo, &Ecto.Migrator.run(&1, :up, all: true)) + end + end + + def rollback(repo, version) do + load_app() + {:ok, _, _} = Ecto.Migrator.with_repo(repo, &Ecto.Migrator.run(&1, :down, to: version)) + end + + defp repos do + Application.fetch_env!(@app, :ecto_repos) + end + + defp load_app do + Application.load(@app) + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite/repo.ex b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite/repo.ex new file mode 100644 index 0000000000..d79bab643c --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite/repo.ex @@ -0,0 +1,5 @@ +defmodule DeployPhoenixSqlite.Repo do + use Ecto.Repo, + otp_app: :deploy_phoenix_sqlite, + adapter: Ecto.Adapters.SQLite3 +end diff --git a/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web.ex b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web.ex new file mode 100644 index 0000000000..5ebd1613a2 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web.ex @@ -0,0 +1,113 @@ +defmodule DeployPhoenixSqliteWeb do + @moduledoc """ + The entrypoint for defining your web interface, such + as controllers, components, channels, and so on. + + This can be used in your application as: + + use DeployPhoenixSqliteWeb, :controller + use DeployPhoenixSqliteWeb, :html + + The definitions below will be executed for every controller, + component, etc, so keep them short and clean, focused + on imports, uses and aliases. + + Do NOT define functions inside the quoted expressions + below. Instead, define additional modules and import + those modules here. + """ + + def static_paths, do: ~w(assets fonts images favicon.ico robots.txt) + + def router do + quote do + use Phoenix.Router, helpers: false + + # Import common connection and controller functions to use in pipelines + import Plug.Conn + import Phoenix.Controller + import Phoenix.LiveView.Router + end + end + + def channel do + quote do + use Phoenix.Channel + end + end + + def controller do + quote do + use Phoenix.Controller, + formats: [:html, :json], + layouts: [html: DeployPhoenixSqliteWeb.Layouts] + + import Plug.Conn + import DeployPhoenixSqliteWeb.Gettext + + unquote(verified_routes()) + end + end + + def live_view do + quote do + use Phoenix.LiveView, + layout: {DeployPhoenixSqliteWeb.Layouts, :app} + + unquote(html_helpers()) + end + end + + def live_component do + quote do + use Phoenix.LiveComponent + + unquote(html_helpers()) + end + end + + def html do + quote do + use Phoenix.Component + + # Import convenience functions from controllers + import Phoenix.Controller, + only: [get_csrf_token: 0, view_module: 1, view_template: 1] + + # Include general helpers for rendering HTML + unquote(html_helpers()) + end + end + + defp html_helpers do + quote do + # HTML escaping functionality + import Phoenix.HTML + # Core UI components and translation + import DeployPhoenixSqliteWeb.CoreComponents + import DeployPhoenixSqliteWeb.Gettext + + # Shortcut for generating JS commands + alias Phoenix.LiveView.JS + + # Routes generation with the ~p sigil + unquote(verified_routes()) + end + end + + def verified_routes do + quote do + use Phoenix.VerifiedRoutes, + endpoint: DeployPhoenixSqliteWeb.Endpoint, + router: DeployPhoenixSqliteWeb.Router, + statics: DeployPhoenixSqliteWeb.static_paths() + end + end + + @doc """ + When used, dispatch to the appropriate controller/live_view/etc. + """ + defmacro __using__(which) when is_atom(which) do + apply(__MODULE__, which, []) + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/components/core_components.ex b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/components/core_components.ex new file mode 100644 index 0000000000..15a1d1f2c7 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/components/core_components.ex @@ -0,0 +1,676 @@ +defmodule DeployPhoenixSqliteWeb.CoreComponents do + @moduledoc """ + Provides core UI components. + + At first glance, this module may seem daunting, but its goal is to provide + core building blocks for your application, such as modals, tables, and + forms. The components consist mostly of markup and are well-documented + with doc strings and declarative assigns. You may customize and style + them in any way you want, based on your application growth and needs. + + The default components use Tailwind CSS, a utility-first CSS framework. + See the [Tailwind CSS documentation](https://tailwindcss.com) to learn + how to customize them or feel free to swap in another framework altogether. + + Icons are provided by [heroicons](https://heroicons.com). See `icon/1` for usage. + """ + use Phoenix.Component + + alias Phoenix.LiveView.JS + import DeployPhoenixSqliteWeb.Gettext + + @doc """ + Renders a modal. + + ## Examples + + <.modal id="confirm-modal"> + This is a modal. + + + JS commands may be passed to the `:on_cancel` to configure + the closing/cancel event, for example: + + <.modal id="confirm" on_cancel={JS.navigate(~p"/posts")}> + This is another modal. + + + """ + attr :id, :string, required: true + attr :show, :boolean, default: false + attr :on_cancel, JS, default: %JS{} + slot :inner_block, required: true + + def modal(assigns) do + ~H""" + + """ + end + + def input(%{type: "select"} = assigns) do + ~H""" +
+ <.label for={@id}><%= @label %> + + <.error :for={msg <- @errors}><%= msg %> +
+ """ + end + + def input(%{type: "textarea"} = assigns) do + ~H""" +
+ <.label for={@id}><%= @label %> + + <.error :for={msg <- @errors}><%= msg %> +
+ """ + end + + # All other inputs text, datetime-local, url, password, etc. are handled here... + def input(assigns) do + ~H""" +
+ <.label for={@id}><%= @label %> + + <.error :for={msg <- @errors}><%= msg %> +
+ """ + end + + @doc """ + Renders a label. + """ + attr :for, :string, default: nil + slot :inner_block, required: true + + def label(assigns) do + ~H""" + + """ + end + + @doc """ + Generates a generic error message. + """ + slot :inner_block, required: true + + def error(assigns) do + ~H""" +

+ <.icon name="hero-exclamation-circle-mini" class="mt-0.5 h-5 w-5 flex-none" /> + <%= render_slot(@inner_block) %> +

+ """ + end + + @doc """ + Renders a header with title. + """ + attr :class, :string, default: nil + + slot :inner_block, required: true + slot :subtitle + slot :actions + + def header(assigns) do + ~H""" +
+
+

+ <%= render_slot(@inner_block) %> +

+

+ <%= render_slot(@subtitle) %> +

+
+
<%= render_slot(@actions) %>
+
+ """ + end + + @doc ~S""" + Renders a table with generic styling. + + ## Examples + + <.table id="users" rows={@users}> + <:col :let={user} label="id"><%= user.id %> + <:col :let={user} label="username"><%= user.username %> + + """ + attr :id, :string, required: true + attr :rows, :list, required: true + attr :row_id, :any, default: nil, doc: "the function for generating the row id" + attr :row_click, :any, default: nil, doc: "the function for handling phx-click on each row" + + attr :row_item, :any, + default: &Function.identity/1, + doc: "the function for mapping each row before calling the :col and :action slots" + + slot :col, required: true do + attr :label, :string + end + + slot :action, doc: "the slot for showing user actions in the last table column" + + def table(assigns) do + assigns = + with %{rows: %Phoenix.LiveView.LiveStream{}} <- assigns do + assign(assigns, row_id: assigns.row_id || fn {id, _item} -> id end) + end + + ~H""" +
+ + + + + + + + + + + + + +
<%= col[:label] %> + <%= gettext("Actions") %> +
+
+ + + <%= render_slot(col, @row_item.(row)) %> + +
+
+
+ + + <%= render_slot(action, @row_item.(row)) %> + +
+
+
+ """ + end + + @doc """ + Renders a data list. + + ## Examples + + <.list> + <:item title="Title"><%= @post.title %> + <:item title="Views"><%= @post.views %> + + """ + slot :item, required: true do + attr :title, :string, required: true + end + + def list(assigns) do + ~H""" +
+
+
+
<%= item.title %>
+
<%= render_slot(item) %>
+
+
+
+ """ + end + + @doc """ + Renders a back navigation link. + + ## Examples + + <.back navigate={~p"/posts"}>Back to posts + """ + attr :navigate, :any, required: true + slot :inner_block, required: true + + def back(assigns) do + ~H""" +
+ <.link + navigate={@navigate} + class="text-sm font-semibold leading-6 text-zinc-900 hover:text-zinc-700" + > + <.icon name="hero-arrow-left-solid" class="h-3 w-3" /> + <%= render_slot(@inner_block) %> + +
+ """ + end + + @doc """ + Renders a [Heroicon](https://heroicons.com). + + Heroicons come in three styles – outline, solid, and mini. + By default, the outline style is used, but solid and mini may + be applied by using the `-solid` and `-mini` suffix. + + You can customize the size and colors of the icons by setting + width, height, and background color classes. + + Icons are extracted from the `deps/heroicons` directory and bundled within + your compiled app.css by the plugin in your `assets/tailwind.config.js`. + + ## Examples + + <.icon name="hero-x-mark-solid" /> + <.icon name="hero-arrow-path" class="ml-1 w-3 h-3 animate-spin" /> + """ + attr :name, :string, required: true + attr :class, :string, default: nil + + def icon(%{name: "hero-" <> _} = assigns) do + ~H""" + + """ + end + + ## JS Commands + + def show(js \\ %JS{}, selector) do + JS.show(js, + to: selector, + time: 300, + transition: + {"transition-all transform ease-out duration-300", + "opacity-0 translate-y-4 sm:translate-y-0 sm:scale-95", + "opacity-100 translate-y-0 sm:scale-100"} + ) + end + + def hide(js \\ %JS{}, selector) do + JS.hide(js, + to: selector, + time: 200, + transition: + {"transition-all transform ease-in duration-200", + "opacity-100 translate-y-0 sm:scale-100", + "opacity-0 translate-y-4 sm:translate-y-0 sm:scale-95"} + ) + end + + def show_modal(js \\ %JS{}, id) when is_binary(id) do + js + |> JS.show(to: "##{id}") + |> JS.show( + to: "##{id}-bg", + time: 300, + transition: {"transition-all transform ease-out duration-300", "opacity-0", "opacity-100"} + ) + |> show("##{id}-container") + |> JS.add_class("overflow-hidden", to: "body") + |> JS.focus_first(to: "##{id}-content") + end + + def hide_modal(js \\ %JS{}, id) do + js + |> JS.hide( + to: "##{id}-bg", + transition: {"transition-all transform ease-in duration-200", "opacity-100", "opacity-0"} + ) + |> hide("##{id}-container") + |> JS.hide(to: "##{id}", transition: {"block", "block", "hidden"}) + |> JS.remove_class("overflow-hidden", to: "body") + |> JS.pop_focus() + end + + @doc """ + Translates an error message using gettext. + """ + def translate_error({msg, opts}) do + # When using gettext, we typically pass the strings we want + # to translate as a static argument: + # + # # Translate the number of files with plural rules + # dngettext("errors", "1 file", "%{count} files", count) + # + # However the error messages in our forms and APIs are generated + # dynamically, so we need to translate them by calling Gettext + # with our gettext backend as first argument. Translations are + # available in the errors.po file (as we use the "errors" domain). + if count = opts[:count] do + Gettext.dngettext(DeployPhoenixSqliteWeb.Gettext, "errors", msg, msg, count, opts) + else + Gettext.dgettext(DeployPhoenixSqliteWeb.Gettext, "errors", msg, opts) + end + end + + @doc """ + Translates the errors for a field from a keyword list of errors. + """ + def translate_errors(errors, field) when is_list(errors) do + for {^field, {msg, opts}} <- errors, do: translate_error({msg, opts}) + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/components/layouts.ex b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/components/layouts.ex new file mode 100644 index 0000000000..14e9c9e24c --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/components/layouts.ex @@ -0,0 +1,14 @@ +defmodule DeployPhoenixSqliteWeb.Layouts do + @moduledoc """ + This module holds different layouts used by your application. + + See the `layouts` directory for all templates available. + The "root" layout is a skeleton rendered as part of the + application router. The "app" layout is set as the default + layout on both `use DeployPhoenixSqliteWeb, :controller` and + `use DeployPhoenixSqliteWeb, :live_view`. + """ + use DeployPhoenixSqliteWeb, :html + + embed_templates "layouts/*" +end diff --git a/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/components/layouts/app.html.heex b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/components/layouts/app.html.heex new file mode 100644 index 0000000000..e23bfc81c4 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/components/layouts/app.html.heex @@ -0,0 +1,32 @@ +
+
+
+ + + +

+ v<%= Application.spec(:phoenix, :vsn) %> +

+
+ +
+
+
+
+ <.flash_group flash={@flash} /> + <%= @inner_content %> +
+
diff --git a/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/components/layouts/root.html.heex b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/components/layouts/root.html.heex new file mode 100644 index 0000000000..fef5e9296e --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/components/layouts/root.html.heex @@ -0,0 +1,17 @@ + + + + + + + <.live_title suffix=" · Phoenix Framework"> + <%= assigns[:page_title] || "DeployPhoenixSqlite" %> + + + + + + <%= @inner_content %> + + diff --git a/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/controllers/error_html.ex b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/controllers/error_html.ex new file mode 100644 index 0000000000..838aa72037 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/controllers/error_html.ex @@ -0,0 +1,24 @@ +defmodule DeployPhoenixSqliteWeb.ErrorHTML do + @moduledoc """ + This module is invoked by your endpoint in case of errors on HTML requests. + + See config/config.exs. + """ + use DeployPhoenixSqliteWeb, :html + + # If you want to customize your error pages, + # uncomment the embed_templates/1 call below + # and add pages to the error directory: + # + # * lib/deploy_phoenix_sqlite_web/controllers/error_html/404.html.heex + # * lib/deploy_phoenix_sqlite_web/controllers/error_html/500.html.heex + # + # embed_templates "error_html/*" + + # The default is to render a plain text page based on + # the template name. For example, "404.html" becomes + # "Not Found". + def render(template, _assigns) do + Phoenix.Controller.status_message_from_template(template) + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/controllers/error_json.ex b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/controllers/error_json.ex new file mode 100644 index 0000000000..ffd19c1389 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/controllers/error_json.ex @@ -0,0 +1,21 @@ +defmodule DeployPhoenixSqliteWeb.ErrorJSON do + @moduledoc """ + This module is invoked by your endpoint in case of errors on JSON requests. + + See config/config.exs. + """ + + # If you want to customize a particular status code, + # you may add your own clauses, such as: + # + # def render("500.json", _assigns) do + # %{errors: %{detail: "Internal Server Error"}} + # end + + # By default, Phoenix returns the status message from + # the template name. For example, "404.json" becomes + # "Not Found". + def render(template, _assigns) do + %{errors: %{detail: Phoenix.Controller.status_message_from_template(template)}} + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/controllers/page_controller.ex b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/controllers/page_controller.ex new file mode 100644 index 0000000000..fdbfcbfadb --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/controllers/page_controller.ex @@ -0,0 +1,9 @@ +defmodule DeployPhoenixSqliteWeb.PageController do + use DeployPhoenixSqliteWeb, :controller + + def home(conn, _params) do + # The home page is often custom made, + # so skip the default app layout. + render(conn, :home, layout: false) + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/controllers/page_html.ex b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/controllers/page_html.ex new file mode 100644 index 0000000000..181ff31e21 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/controllers/page_html.ex @@ -0,0 +1,10 @@ +defmodule DeployPhoenixSqliteWeb.PageHTML do + @moduledoc """ + This module contains pages rendered by PageController. + + See the `page_html` directory for all templates available. + """ + use DeployPhoenixSqliteWeb, :html + + embed_templates "page_html/*" +end diff --git a/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/controllers/page_html/home.html.heex b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/controllers/page_html/home.html.heex new file mode 100644 index 0000000000..dc1820b11e --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/controllers/page_html/home.html.heex @@ -0,0 +1,222 @@ +<.flash_group flash={@flash} /> + +
+
+ +

+ Phoenix Framework + + v<%= Application.spec(:phoenix, :vsn) %> + +

+

+ Peace of mind from prototype to production. +

+

+ Build rich, interactive web applications quickly, with less code and fewer moving parts. Join our growing community of developers using Phoenix to craft APIs, HTML5 apps and more, for fun or at scale. +

+ +
+
diff --git a/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/endpoint.ex b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/endpoint.ex new file mode 100644 index 0000000000..6ebc571fba --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/endpoint.ex @@ -0,0 +1,53 @@ +defmodule DeployPhoenixSqliteWeb.Endpoint do + use Phoenix.Endpoint, otp_app: :deploy_phoenix_sqlite + + # The session will be stored in the cookie and signed, + # this means its contents can be read but not tampered with. + # Set :encryption_salt if you would also like to encrypt it. + @session_options [ + store: :cookie, + key: "_deploy_phoenix_sqlite_key", + signing_salt: "+EvAIZj3", + same_site: "Lax" + ] + + socket "/live", Phoenix.LiveView.Socket, + websocket: [connect_info: [session: @session_options]], + longpoll: [connect_info: [session: @session_options]] + + # Serve at "/" the static files from "priv/static" directory. + # + # You should set gzip to true if you are running phx.digest + # when deploying your static files in production. + plug Plug.Static, + at: "/", + from: :deploy_phoenix_sqlite, + gzip: false, + only: DeployPhoenixSqliteWeb.static_paths() + + # Code reloading can be explicitly enabled under the + # :code_reloader configuration of your endpoint. + if code_reloading? do + socket "/phoenix/live_reload/socket", Phoenix.LiveReloader.Socket + plug Phoenix.LiveReloader + plug Phoenix.CodeReloader + plug Phoenix.Ecto.CheckRepoStatus, otp_app: :deploy_phoenix_sqlite + end + + plug Phoenix.LiveDashboard.RequestLogger, + param_key: "request_logger", + cookie_key: "request_logger" + + plug Plug.RequestId + plug Plug.Telemetry, event_prefix: [:phoenix, :endpoint] + + plug Plug.Parsers, + parsers: [:urlencoded, :multipart, :json], + pass: ["*/*"], + json_decoder: Phoenix.json_library() + + plug Plug.MethodOverride + plug Plug.Head + plug Plug.Session, @session_options + plug DeployPhoenixSqliteWeb.Router +end diff --git a/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/gettext.ex b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/gettext.ex new file mode 100644 index 0000000000..d50c1c97e8 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/gettext.ex @@ -0,0 +1,24 @@ +defmodule DeployPhoenixSqliteWeb.Gettext do + @moduledoc """ + A module providing Internationalization with a gettext-based API. + + By using [Gettext](https://hexdocs.pm/gettext), + your module gains a set of macros for translations, for example: + + import DeployPhoenixSqliteWeb.Gettext + + # Simple translation + gettext("Here is the string to translate") + + # Plural translation + ngettext("Here is the string to translate", + "Here are the strings to translate", + 3) + + # Domain-based translation + dgettext("errors", "Here is the error message to translate") + + See the [Gettext Docs](https://hexdocs.pm/gettext) for detailed usage. + """ + use Gettext, otp_app: :deploy_phoenix_sqlite +end diff --git a/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/router.ex b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/router.ex new file mode 100644 index 0000000000..1af67818c7 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/router.ex @@ -0,0 +1,44 @@ +defmodule DeployPhoenixSqliteWeb.Router do + use DeployPhoenixSqliteWeb, :router + + pipeline :browser do + plug :accepts, ["html"] + plug :fetch_session + plug :fetch_live_flash + plug :put_root_layout, html: {DeployPhoenixSqliteWeb.Layouts, :root} + plug :protect_from_forgery + plug :put_secure_browser_headers + end + + pipeline :api do + plug :accepts, ["json"] + end + + scope "/", DeployPhoenixSqliteWeb do + pipe_through :browser + + get "/", PageController, :home + end + + # Other scopes may use custom stacks. + # scope "/api", DeployPhoenixSqliteWeb do + # pipe_through :api + # end + + # Enable LiveDashboard and Swoosh mailbox preview in development + if Application.compile_env(:deploy_phoenix_sqlite, :dev_routes) do + # If you want to use the LiveDashboard in production, you should put + # it behind authentication and allow only admins to access it. + # If your application does not have an admins-only section yet, + # you can use Plug.BasicAuth to set up some basic authentication + # as long as you are also using SSL (which you should anyway). + import Phoenix.LiveDashboard.Router + + scope "/dev" do + pipe_through :browser + + live_dashboard "/dashboard", metrics: DeployPhoenixSqliteWeb.Telemetry + forward "/mailbox", Plug.Swoosh.MailboxPreview + end + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/telemetry.ex b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/telemetry.ex new file mode 100644 index 0000000000..019408039e --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/lib/deploy_phoenix_sqlite_web/telemetry.ex @@ -0,0 +1,92 @@ +defmodule DeployPhoenixSqliteWeb.Telemetry do + use Supervisor + import Telemetry.Metrics + + def start_link(arg) do + Supervisor.start_link(__MODULE__, arg, name: __MODULE__) + end + + @impl true + def init(_arg) do + children = [ + # Telemetry poller will execute the given period measurements + # every 10_000ms. Learn more here: https://hexdocs.pm/telemetry_metrics + {:telemetry_poller, measurements: periodic_measurements(), period: 10_000} + # Add reporters as children of your supervision tree. + # {Telemetry.Metrics.ConsoleReporter, metrics: metrics()} + ] + + Supervisor.init(children, strategy: :one_for_one) + end + + def metrics do + [ + # Phoenix Metrics + summary("phoenix.endpoint.start.system_time", + unit: {:native, :millisecond} + ), + summary("phoenix.endpoint.stop.duration", + unit: {:native, :millisecond} + ), + summary("phoenix.router_dispatch.start.system_time", + tags: [:route], + unit: {:native, :millisecond} + ), + summary("phoenix.router_dispatch.exception.duration", + tags: [:route], + unit: {:native, :millisecond} + ), + summary("phoenix.router_dispatch.stop.duration", + tags: [:route], + unit: {:native, :millisecond} + ), + summary("phoenix.socket_connected.duration", + unit: {:native, :millisecond} + ), + summary("phoenix.channel_joined.duration", + unit: {:native, :millisecond} + ), + summary("phoenix.channel_handled_in.duration", + tags: [:event], + unit: {:native, :millisecond} + ), + + # Database Metrics + summary("deploy_phoenix_sqlite.repo.query.total_time", + unit: {:native, :millisecond}, + description: "The sum of the other measurements" + ), + summary("deploy_phoenix_sqlite.repo.query.decode_time", + unit: {:native, :millisecond}, + description: "The time spent decoding the data received from the database" + ), + summary("deploy_phoenix_sqlite.repo.query.query_time", + unit: {:native, :millisecond}, + description: "The time spent executing the query" + ), + summary("deploy_phoenix_sqlite.repo.query.queue_time", + unit: {:native, :millisecond}, + description: "The time spent waiting for a database connection" + ), + summary("deploy_phoenix_sqlite.repo.query.idle_time", + unit: {:native, :millisecond}, + description: + "The time the connection spent waiting before being checked out for the query" + ), + + # VM Metrics + summary("vm.memory.total", unit: {:byte, :kilobyte}), + summary("vm.total_run_queue_lengths.total"), + summary("vm.total_run_queue_lengths.cpu"), + summary("vm.total_run_queue_lengths.io") + ] + end + + defp periodic_measurements do + [ + # A module, function and arguments to be invoked periodically. + # This function must call :telemetry.execute/3 and a metric must be added above. + # {DeployPhoenixSqliteWeb, :count_users, []} + ] + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite/mix.exs b/test/fixtures/deploy-phoenix-sqlite/mix.exs new file mode 100644 index 0000000000..9bc2bc550f --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/mix.exs @@ -0,0 +1,85 @@ +defmodule DeployPhoenixSqlite.MixProject do + use Mix.Project + + def project do + [ + app: :deploy_phoenix_sqlite, + version: "0.1.0", + elixir: "~> 1.14", + elixirc_paths: elixirc_paths(Mix.env()), + start_permanent: Mix.env() == :prod, + aliases: aliases(), + deps: deps() + ] + end + + # Configuration for the OTP application. + # + # Type `mix help compile.app` for more information. + def application do + [ + mod: {DeployPhoenixSqlite.Application, []}, + extra_applications: [:logger, :runtime_tools] + ] + end + + # Specifies which paths to compile per environment. + defp elixirc_paths(:test), do: ["lib", "test/support"] + defp elixirc_paths(_), do: ["lib"] + + # Specifies your project dependencies. + # + # Type `mix help deps` for examples and options. + defp deps do + [ + {:phoenix, "~> 1.7.17"}, + {:phoenix_ecto, "~> 4.5"}, + {:ecto_sql, "~> 3.10"}, + {:ecto_sqlite3, ">= 0.0.0"}, + {:phoenix_html, "~> 4.1"}, + {:phoenix_live_reload, "~> 1.2", only: :dev}, + {:phoenix_live_view, "~> 1.0.0"}, + {:floki, ">= 0.30.0", only: :test}, + {:phoenix_live_dashboard, "~> 0.8.3"}, + {:esbuild, "~> 0.8", runtime: Mix.env() == :dev}, + {:tailwind, "~> 0.2", runtime: Mix.env() == :dev}, + {:heroicons, + github: "tailwindlabs/heroicons", + tag: "v2.1.1", + sparse: "optimized", + app: false, + compile: false, + depth: 1}, + {:swoosh, "~> 1.5"}, + {:finch, "~> 0.13"}, + {:telemetry_metrics, "~> 1.0"}, + {:telemetry_poller, "~> 1.0"}, + {:gettext, "~> 0.20"}, + {:jason, "~> 1.2"}, + {:dns_cluster, "~> 0.1.1"}, + {:bandit, "~> 1.5"} + ] + end + + # Aliases are shortcuts or tasks specific to the current project. + # For example, to install project dependencies and perform other setup tasks, run: + # + # $ mix setup + # + # See the documentation for `Mix` for more info on aliases. + defp aliases do + [ + setup: ["deps.get", "ecto.setup", "assets.setup", "assets.build"], + "ecto.setup": ["ecto.create", "ecto.migrate", "run priv/repo/seeds.exs"], + "ecto.reset": ["ecto.drop", "ecto.setup"], + test: ["ecto.create --quiet", "ecto.migrate --quiet", "test"], + "assets.setup": ["tailwind.install --if-missing", "esbuild.install --if-missing"], + "assets.build": ["tailwind deploy_phoenix_sqlite", "esbuild deploy_phoenix_sqlite"], + "assets.deploy": [ + "tailwind deploy_phoenix_sqlite --minify", + "esbuild deploy_phoenix_sqlite --minify", + "phx.digest" + ] + ] + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite/mix.lock b/test/fixtures/deploy-phoenix-sqlite/mix.lock new file mode 100644 index 0000000000..a36ae83355 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/mix.lock @@ -0,0 +1,44 @@ +%{ + "bandit": {:hex, :bandit, "1.6.1", "9e01b93d72ddc21d8c576a704949e86ee6cde7d11270a1d3073787876527a48f", [:mix], [{:hpax, "~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:thousand_island, "~> 1.0", [hex: :thousand_island, repo: "hexpm", optional: false]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "5a904bf010ea24b67979835e0507688e31ac873d4ffc8ed0e5413e8d77455031"}, + "castore": {:hex, :castore, "1.0.10", "43bbeeac820f16c89f79721af1b3e092399b3a1ecc8df1a472738fd853574911", [:mix], [], "hexpm", "1b0b7ea14d889d9ea21202c43a4fa015eb913021cb535e8ed91946f4b77a8848"}, + "cc_precompiler": {:hex, :cc_precompiler, "0.1.10", "47c9c08d8869cf09b41da36538f62bc1abd3e19e41701c2cea2675b53c704258", [:mix], [{:elixir_make, "~> 0.7", [hex: :elixir_make, repo: "hexpm", optional: false]}], "hexpm", "f6e046254e53cd6b41c6bacd70ae728011aa82b2742a80d6e2214855c6e06b22"}, + "db_connection": {:hex, :db_connection, "2.7.0", "b99faa9291bb09892c7da373bb82cba59aefa9b36300f6145c5f201c7adf48ec", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "dcf08f31b2701f857dfc787fbad78223d61a32204f217f15e881dd93e4bdd3ff"}, + "decimal": {:hex, :decimal, "2.2.0", "df3d06bb9517e302b1bd265c1e7f16cda51547ad9d99892049340841f3e15836", [:mix], [], "hexpm", "af8daf87384b51b7e611fb1a1f2c4d4876b65ef968fa8bd3adf44cff401c7f21"}, + "dns_cluster": {:hex, :dns_cluster, "0.1.3", "0bc20a2c88ed6cc494f2964075c359f8c2d00e1bf25518a6a6c7fd277c9b0c66", [:mix], [], "hexpm", "46cb7c4a1b3e52c7ad4cbe33ca5079fbde4840dedeafca2baf77996c2da1bc33"}, + "ecto": {:hex, :ecto, "3.12.5", "4a312960ce612e17337e7cefcf9be45b95a3be6b36b6f94dfb3d8c361d631866", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "6eb18e80bef8bb57e17f5a7f068a1719fbda384d40fc37acb8eb8aeca493b6ea"}, + "ecto_sql": {:hex, :ecto_sql, "3.12.1", "c0d0d60e85d9ff4631f12bafa454bc392ce8b9ec83531a412c12a0d415a3a4d0", [:mix], [{:db_connection, "~> 2.4.1 or ~> 2.5", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.12", [hex: :ecto, repo: "hexpm", optional: false]}, {:myxql, "~> 0.7", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.19 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:tds, "~> 2.1.1 or ~> 2.2", [hex: :tds, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "aff5b958a899762c5f09028c847569f7dfb9cc9d63bdb8133bff8a5546de6bf5"}, + "ecto_sqlite3": {:hex, :ecto_sqlite3, "0.17.5", "fbee5c17ff6afd8e9ded519b0abb363926c65d30b27577232bb066b2a79957b8", [:mix], [{:decimal, "~> 1.6 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:ecto, "~> 3.12", [hex: :ecto, repo: "hexpm", optional: false]}, {:ecto_sql, "~> 3.12", [hex: :ecto_sql, repo: "hexpm", optional: false]}, {:exqlite, "~> 0.22", [hex: :exqlite, repo: "hexpm", optional: false]}], "hexpm", "3b54734d998cbd032ac59403c36acf4e019670e8b6ceef9c6c33d8986c4e9704"}, + "elixir_make": {:hex, :elixir_make, "0.9.0", "6484b3cd8c0cee58f09f05ecaf1a140a8c97670671a6a0e7ab4dc326c3109726", [:mix], [], "hexpm", "db23d4fd8b757462ad02f8aa73431a426fe6671c80b200d9710caf3d1dd0ffdb"}, + "esbuild": {:hex, :esbuild, "0.8.2", "5f379dfa383ef482b738e7771daf238b2d1cfb0222bef9d3b20d4c8f06c7a7ac", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}, {:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "558a8a08ed78eb820efbfda1de196569d8bfa9b51e8371a1934fbb31345feda7"}, + "expo": {:hex, :expo, "1.1.0", "f7b9ed7fb5745ebe1eeedf3d6f29226c5dd52897ac67c0f8af62a07e661e5c75", [:mix], [], "hexpm", "fbadf93f4700fb44c331362177bdca9eeb8097e8b0ef525c9cc501cb9917c960"}, + "exqlite": {:hex, :exqlite, "0.27.1", "73fc0b3dc3b058a77a2b3771f82a6af2ddcf370b069906968a34083d2ffd2884", [:make, :mix], [{:cc_precompiler, "~> 0.1", [hex: :cc_precompiler, repo: "hexpm", optional: false]}, {:db_connection, "~> 2.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:elixir_make, "~> 0.8", [hex: :elixir_make, repo: "hexpm", optional: false]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "79ef5756451cfb022e8013e1ed00d0f8f7d1333c19502c394dc16b15cfb4e9b4"}, + "file_system": {:hex, :file_system, "1.0.1", "79e8ceaddb0416f8b8cd02a0127bdbababe7bf4a23d2a395b983c1f8b3f73edd", [:mix], [], "hexpm", "4414d1f38863ddf9120720cd976fce5bdde8e91d8283353f0e31850fa89feb9e"}, + "finch": {:hex, :finch, "0.19.0", "c644641491ea854fc5c1bbaef36bfc764e3f08e7185e1f084e35e0672241b76d", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.6.2 or ~> 1.7", [hex: :mint, repo: "hexpm", optional: false]}, {:nimble_options, "~> 0.4 or ~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:nimble_pool, "~> 1.1", [hex: :nimble_pool, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "fc5324ce209125d1e2fa0fcd2634601c52a787aff1cd33ee833664a5af4ea2b6"}, + "floki": {:hex, :floki, "0.37.0", "b83e0280bbc6372f2a403b2848013650b16640cd2470aea6701f0632223d719e", [:mix], [], "hexpm", "516a0c15a69f78c47dc8e0b9b3724b29608aa6619379f91b1ffa47109b5d0dd3"}, + "gettext": {:hex, :gettext, "0.26.2", "5978aa7b21fada6deabf1f6341ddba50bc69c999e812211903b169799208f2a8", [:mix], [{:expo, "~> 0.5.1 or ~> 1.0", [hex: :expo, repo: "hexpm", optional: false]}], "hexpm", "aa978504bcf76511efdc22d580ba08e2279caab1066b76bb9aa81c4a1e0a32a5"}, + "heroicons": {:git, "https://github.com/tailwindlabs/heroicons.git", "88ab3a0d790e6a47404cba02800a6b25d2afae50", [tag: "v2.1.1", sparse: "optimized"]}, + "hpax": {:hex, :hpax, "1.0.1", "c857057f89e8bd71d97d9042e009df2a42705d6d690d54eca84c8b29af0787b0", [:mix], [], "hexpm", "4e2d5a4f76ae1e3048f35ae7adb1641c36265510a2d4638157fbcb53dda38445"}, + "jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"}, + "mime": {:hex, :mime, "2.0.6", "8f18486773d9b15f95f4f4f1e39b710045fa1de891fada4516559967276e4dc2", [:mix], [], "hexpm", "c9945363a6b26d747389aac3643f8e0e09d30499a138ad64fe8fd1d13d9b153e"}, + "mint": {:hex, :mint, "1.6.2", "af6d97a4051eee4f05b5500671d47c3a67dac7386045d87a904126fd4bbcea2e", [:mix], [{:castore, "~> 0.1.0 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:hpax, "~> 0.1.1 or ~> 0.2.0 or ~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}], "hexpm", "5ee441dffc1892f1ae59127f74afe8fd82fda6587794278d924e4d90ea3d63f9"}, + "nimble_options": {:hex, :nimble_options, "1.1.1", "e3a492d54d85fc3fd7c5baf411d9d2852922f66e69476317787a7b2bb000a61b", [:mix], [], "hexpm", "821b2470ca9442c4b6984882fe9bb0389371b8ddec4d45a9504f00a66f650b44"}, + "nimble_pool": {:hex, :nimble_pool, "1.1.0", "bf9c29fbdcba3564a8b800d1eeb5a3c58f36e1e11d7b7fb2e084a643f645f06b", [:mix], [], "hexpm", "af2e4e6b34197db81f7aad230c1118eac993acc0dae6bc83bac0126d4ae0813a"}, + "phoenix": {:hex, :phoenix, "1.7.17", "2fcdceecc6fb90bec26fab008f96abbd0fd93bc9956ec7985e5892cf545152ca", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix_pubsub, "~> 2.1", [hex: :phoenix_pubsub, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.7", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:plug_crypto, "~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:websock_adapter, "~> 0.5.3", [hex: :websock_adapter, repo: "hexpm", optional: false]}], "hexpm", "50e8ad537f3f7b0efb1509b2f75b5c918f697be6a45d48e49a30d3b7c0e464c9"}, + "phoenix_ecto": {:hex, :phoenix_ecto, "4.6.3", "f686701b0499a07f2e3b122d84d52ff8a31f5def386e03706c916f6feddf69ef", [:mix], [{:ecto, "~> 3.5", [hex: :ecto, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.1", [hex: :phoenix_html, repo: "hexpm", optional: true]}, {:plug, "~> 1.9", [hex: :plug, repo: "hexpm", optional: false]}, {:postgrex, "~> 0.16 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}], "hexpm", "909502956916a657a197f94cc1206d9a65247538de8a5e186f7537c895d95764"}, + "phoenix_html": {:hex, :phoenix_html, "4.1.1", "4c064fd3873d12ebb1388425a8f2a19348cef56e7289e1998e2d2fa758aa982e", [:mix], [], "hexpm", "f2f2df5a72bc9a2f510b21497fd7d2b86d932ec0598f0210fed4114adc546c6f"}, + "phoenix_live_dashboard": {:hex, :phoenix_live_dashboard, "0.8.5", "d5f44d7dbd7cfacaa617b70c5a14b2b598d6f93b9caa8e350c51d56cd4350a9b", [:mix], [{:ecto, "~> 3.6.2 or ~> 3.7", [hex: :ecto, repo: "hexpm", optional: true]}, {:ecto_mysql_extras, "~> 0.5", [hex: :ecto_mysql_extras, repo: "hexpm", optional: true]}, {:ecto_psql_extras, "~> 0.7", [hex: :ecto_psql_extras, repo: "hexpm", optional: true]}, {:ecto_sqlite3_extras, "~> 1.1.7 or ~> 1.2.0", [hex: :ecto_sqlite3_extras, repo: "hexpm", optional: true]}, {:mime, "~> 1.6 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:phoenix_live_view, "~> 0.19 or ~> 1.0", [hex: :phoenix_live_view, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 0.6 or ~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}], "hexpm", "1d73920515554d7d6c548aee0bf10a4780568b029d042eccb336db29ea0dad70"}, + "phoenix_live_reload": {:hex, :phoenix_live_reload, "1.5.3", "f2161c207fda0e4fb55165f650f7f8db23f02b29e3bff00ff7ef161d6ac1f09d", [:mix], [{:file_system, "~> 0.3 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:phoenix, "~> 1.4", [hex: :phoenix, repo: "hexpm", optional: false]}], "hexpm", "b4ec9cd73cb01ff1bd1cac92e045d13e7030330b74164297d1aee3907b54803c"}, + "phoenix_live_view": {:hex, :phoenix_live_view, "1.0.0", "3a10dfce8f87b2ad4dc65de0732fc2a11e670b2779a19e8d3281f4619a85bce4", [:mix], [{:floki, "~> 0.36", [hex: :floki, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix, "~> 1.6.15 or ~> 1.7.0", [hex: :phoenix, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 3.3 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.15", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.2 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "254caef0028765965ca6bd104cc7d68dcc7d57cc42912bef92f6b03047251d99"}, + "phoenix_pubsub": {:hex, :phoenix_pubsub, "2.1.3", "3168d78ba41835aecad272d5e8cd51aa87a7ac9eb836eabc42f6e57538e3731d", [:mix], [], "hexpm", "bba06bc1dcfd8cb086759f0edc94a8ba2bc8896d5331a1e2c2902bf8e36ee502"}, + "phoenix_template": {:hex, :phoenix_template, "1.0.4", "e2092c132f3b5e5b2d49c96695342eb36d0ed514c5b252a77048d5969330d639", [:mix], [{:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}], "hexpm", "2c0c81f0e5c6753faf5cca2f229c9709919aba34fab866d3bc05060c9c444206"}, + "plug": {:hex, :plug, "1.16.1", "40c74619c12f82736d2214557dedec2e9762029b2438d6d175c5074c933edc9d", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.1.1 or ~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "a13ff6b9006b03d7e33874945b2755253841b238c34071ed85b0e86057f8cddc"}, + "plug_crypto": {:hex, :plug_crypto, "2.1.0", "f44309c2b06d249c27c8d3f65cfe08158ade08418cf540fd4f72d4d6863abb7b", [:mix], [], "hexpm", "131216a4b030b8f8ce0f26038bc4421ae60e4bb95c5cf5395e1421437824c4fa"}, + "swoosh": {:hex, :swoosh, "1.17.3", "5cda7bff6bc1121cc5b58db8ed90ef33261b373425ae3e32dd599688037a0482", [:mix], [{:bandit, ">= 1.0.0", [hex: :bandit, repo: "hexpm", optional: true]}, {:cowboy, "~> 1.1 or ~> 2.4", [hex: :cowboy, repo: "hexpm", optional: true]}, {:ex_aws, "~> 2.1", [hex: :ex_aws, repo: "hexpm", optional: true]}, {:finch, "~> 0.6", [hex: :finch, repo: "hexpm", optional: true]}, {:gen_smtp, "~> 0.13 or ~> 1.0", [hex: :gen_smtp, repo: "hexpm", optional: true]}, {:hackney, "~> 1.9", [hex: :hackney, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:mail, "~> 0.2", [hex: :mail, repo: "hexpm", optional: true]}, {:mime, "~> 1.1 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mua, "~> 0.2.3", [hex: :mua, repo: "hexpm", optional: true]}, {:multipart, "~> 0.4", [hex: :multipart, repo: "hexpm", optional: true]}, {:plug, "~> 1.9", [hex: :plug, repo: "hexpm", optional: true]}, {:plug_cowboy, ">= 1.0.0", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:req, "~> 0.5 or ~> 1.0", [hex: :req, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.2 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "14ad57cfbb70af57323e17f569f5840a33c01f8ebc531dd3846beef3c9c95e55"}, + "tailwind": {:hex, :tailwind, "0.2.4", "5706ec47182d4e7045901302bf3a333e80f3d1af65c442ba9a9eed152fb26c2e", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}], "hexpm", "c6e4a82b8727bab593700c998a4d98cf3d8025678bfde059aed71d0000c3e463"}, + "telemetry": {:hex, :telemetry, "1.3.0", "fedebbae410d715cf8e7062c96a1ef32ec22e764197f70cda73d82778d61e7a2", [:rebar3], [], "hexpm", "7015fc8919dbe63764f4b4b87a95b7c0996bd539e0d499be6ec9d7f3875b79e6"}, + "telemetry_metrics": {:hex, :telemetry_metrics, "1.0.0", "29f5f84991ca98b8eb02fc208b2e6de7c95f8bb2294ef244a176675adc7775df", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "f23713b3847286a534e005126d4c959ebcca68ae9582118ce436b521d1d47d5d"}, + "telemetry_poller": {:hex, :telemetry_poller, "1.1.0", "58fa7c216257291caaf8d05678c8d01bd45f4bdbc1286838a28c4bb62ef32999", [:rebar3], [{:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "9eb9d9cbfd81cbd7cdd24682f8711b6e2b691289a0de6826e58452f28c103c8f"}, + "thousand_island": {:hex, :thousand_island, "1.3.7", "1da7598c0f4f5f50562c097a3f8af308ded48cd35139f0e6f17d9443e4d0c9c5", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "0139335079953de41d381a6134d8b618d53d084f558c734f2662d1a72818dd12"}, + "websock": {:hex, :websock, "0.5.3", "2f69a6ebe810328555b6fe5c831a851f485e303a7c8ce6c5f675abeb20ebdadc", [:mix], [], "hexpm", "6105453d7fac22c712ad66fab1d45abdf049868f253cf719b625151460b8b453"}, + "websock_adapter": {:hex, :websock_adapter, "0.5.8", "3b97dc94e407e2d1fc666b2fb9acf6be81a1798a2602294aac000260a7c4a47d", [:mix], [{:bandit, ">= 0.6.0", [hex: :bandit, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.6", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "315b9a1865552212b5f35140ad194e67ce31af45bcee443d4ecb96b5fd3f3782"}, +} diff --git a/test/fixtures/deploy-phoenix-sqlite/priv/gettext/en/LC_MESSAGES/errors.po b/test/fixtures/deploy-phoenix-sqlite/priv/gettext/en/LC_MESSAGES/errors.po new file mode 100644 index 0000000000..844c4f5cea --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/priv/gettext/en/LC_MESSAGES/errors.po @@ -0,0 +1,112 @@ +## `msgid`s in this file come from POT (.pot) files. +## +## Do not add, change, or remove `msgid`s manually here as +## they're tied to the ones in the corresponding POT file +## (with the same domain). +## +## Use `mix gettext.extract --merge` or `mix gettext.merge` +## to merge POT files into PO files. +msgid "" +msgstr "" +"Language: en\n" + +## From Ecto.Changeset.cast/4 +msgid "can't be blank" +msgstr "" + +## From Ecto.Changeset.unique_constraint/3 +msgid "has already been taken" +msgstr "" + +## From Ecto.Changeset.put_change/3 +msgid "is invalid" +msgstr "" + +## From Ecto.Changeset.validate_acceptance/3 +msgid "must be accepted" +msgstr "" + +## From Ecto.Changeset.validate_format/3 +msgid "has invalid format" +msgstr "" + +## From Ecto.Changeset.validate_subset/3 +msgid "has an invalid entry" +msgstr "" + +## From Ecto.Changeset.validate_exclusion/3 +msgid "is reserved" +msgstr "" + +## From Ecto.Changeset.validate_confirmation/3 +msgid "does not match confirmation" +msgstr "" + +## From Ecto.Changeset.no_assoc_constraint/3 +msgid "is still associated with this entry" +msgstr "" + +msgid "are still associated with this entry" +msgstr "" + +## From Ecto.Changeset.validate_length/3 +msgid "should have %{count} item(s)" +msgid_plural "should have %{count} item(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be %{count} character(s)" +msgid_plural "should be %{count} character(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be %{count} byte(s)" +msgid_plural "should be %{count} byte(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should have at least %{count} item(s)" +msgid_plural "should have at least %{count} item(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at least %{count} character(s)" +msgid_plural "should be at least %{count} character(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at least %{count} byte(s)" +msgid_plural "should be at least %{count} byte(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should have at most %{count} item(s)" +msgid_plural "should have at most %{count} item(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at most %{count} character(s)" +msgid_plural "should be at most %{count} character(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at most %{count} byte(s)" +msgid_plural "should be at most %{count} byte(s)" +msgstr[0] "" +msgstr[1] "" + +## From Ecto.Changeset.validate_number/3 +msgid "must be less than %{number}" +msgstr "" + +msgid "must be greater than %{number}" +msgstr "" + +msgid "must be less than or equal to %{number}" +msgstr "" + +msgid "must be greater than or equal to %{number}" +msgstr "" + +msgid "must be equal to %{number}" +msgstr "" diff --git a/test/fixtures/deploy-phoenix-sqlite/priv/gettext/errors.pot b/test/fixtures/deploy-phoenix-sqlite/priv/gettext/errors.pot new file mode 100644 index 0000000000..eef2de2ba4 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/priv/gettext/errors.pot @@ -0,0 +1,109 @@ +## This is a PO Template file. +## +## `msgid`s here are often extracted from source code. +## Add new translations manually only if they're dynamic +## translations that can't be statically extracted. +## +## Run `mix gettext.extract` to bring this file up to +## date. Leave `msgstr`s empty as changing them here has no +## effect: edit them in PO (`.po`) files instead. +## From Ecto.Changeset.cast/4 +msgid "can't be blank" +msgstr "" + +## From Ecto.Changeset.unique_constraint/3 +msgid "has already been taken" +msgstr "" + +## From Ecto.Changeset.put_change/3 +msgid "is invalid" +msgstr "" + +## From Ecto.Changeset.validate_acceptance/3 +msgid "must be accepted" +msgstr "" + +## From Ecto.Changeset.validate_format/3 +msgid "has invalid format" +msgstr "" + +## From Ecto.Changeset.validate_subset/3 +msgid "has an invalid entry" +msgstr "" + +## From Ecto.Changeset.validate_exclusion/3 +msgid "is reserved" +msgstr "" + +## From Ecto.Changeset.validate_confirmation/3 +msgid "does not match confirmation" +msgstr "" + +## From Ecto.Changeset.no_assoc_constraint/3 +msgid "is still associated with this entry" +msgstr "" + +msgid "are still associated with this entry" +msgstr "" + +## From Ecto.Changeset.validate_length/3 +msgid "should have %{count} item(s)" +msgid_plural "should have %{count} item(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be %{count} character(s)" +msgid_plural "should be %{count} character(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be %{count} byte(s)" +msgid_plural "should be %{count} byte(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should have at least %{count} item(s)" +msgid_plural "should have at least %{count} item(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at least %{count} character(s)" +msgid_plural "should be at least %{count} character(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at least %{count} byte(s)" +msgid_plural "should be at least %{count} byte(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should have at most %{count} item(s)" +msgid_plural "should have at most %{count} item(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at most %{count} character(s)" +msgid_plural "should be at most %{count} character(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at most %{count} byte(s)" +msgid_plural "should be at most %{count} byte(s)" +msgstr[0] "" +msgstr[1] "" + +## From Ecto.Changeset.validate_number/3 +msgid "must be less than %{number}" +msgstr "" + +msgid "must be greater than %{number}" +msgstr "" + +msgid "must be less than or equal to %{number}" +msgstr "" + +msgid "must be greater than or equal to %{number}" +msgstr "" + +msgid "must be equal to %{number}" +msgstr "" diff --git a/test/fixtures/deploy-phoenix-sqlite/priv/repo/migrations/.formatter.exs b/test/fixtures/deploy-phoenix-sqlite/priv/repo/migrations/.formatter.exs new file mode 100644 index 0000000000..49f9151ed2 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/priv/repo/migrations/.formatter.exs @@ -0,0 +1,4 @@ +[ + import_deps: [:ecto_sql], + inputs: ["*.exs"] +] diff --git a/test/fixtures/deploy-phoenix-sqlite/priv/repo/seeds.exs b/test/fixtures/deploy-phoenix-sqlite/priv/repo/seeds.exs new file mode 100644 index 0000000000..ebeccbf3c7 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/priv/repo/seeds.exs @@ -0,0 +1,11 @@ +# Script for populating the database. You can run it as: +# +# mix run priv/repo/seeds.exs +# +# Inside the script, you can read and write to any of your +# repositories directly: +# +# DeployPhoenixSqlite.Repo.insert!(%DeployPhoenixSqlite.SomeSchema{}) +# +# We recommend using the bang functions (`insert!`, `update!` +# and so on) as they will fail if something goes wrong. diff --git a/test/fixtures/deploy-phoenix-sqlite/priv/static/favicon.ico b/test/fixtures/deploy-phoenix-sqlite/priv/static/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..7f372bfc21cdd8cb47585339d5fa4d9dd424402f GIT binary patch literal 152 zcmeAS@N?(olHy`uVBq!ia0vp^4j|0I1|(Ny7TyC=@t!V@Ar*{oFEH`~d50E!_s``s q?{G*w(7?#d#v@^nKnY_HKaYb01EZMZjMqTJ89ZJ6T-G@yGywoKK_h|y literal 0 HcmV?d00001 diff --git a/test/fixtures/deploy-phoenix-sqlite/priv/static/images/logo.svg b/test/fixtures/deploy-phoenix-sqlite/priv/static/images/logo.svg new file mode 100644 index 0000000000..9f26babac2 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/priv/static/images/logo.svg @@ -0,0 +1,6 @@ + diff --git a/test/fixtures/deploy-phoenix-sqlite/priv/static/robots.txt b/test/fixtures/deploy-phoenix-sqlite/priv/static/robots.txt new file mode 100644 index 0000000000..26e06b5f19 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/priv/static/robots.txt @@ -0,0 +1,5 @@ +# See https://www.robotstxt.org/robotstxt.html for documentation on how to use the robots.txt file +# +# To ban all spiders from the entire site uncomment the next two lines: +# User-agent: * +# Disallow: / diff --git a/test/fixtures/deploy-phoenix-sqlite/rel/env.sh.eex b/test/fixtures/deploy-phoenix-sqlite/rel/env.sh.eex new file mode 100755 index 0000000000..5b24b1e55a --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/rel/env.sh.eex @@ -0,0 +1,13 @@ +#!/bin/sh + +# configure node for distributed erlang with IPV6 support +export ERL_AFLAGS="-proto_dist inet6_tcp" +export ECTO_IPV6="true" +export DNS_CLUSTER_QUERY="${FLY_APP_NAME}.internal" +export RELEASE_DISTRIBUTION="name" +export RELEASE_NODE="${FLY_APP_NAME}-${FLY_IMAGE_REF##*[:'-']}@${FLY_PRIVATE_IP}" + +# Uncomment to send crash dumps to stderr +# This can be useful for debugging, but may log sensitive information +# export ERL_CRASH_DUMP=/dev/stderr +# export ERL_CRASH_DUMP_BYTES=4096 diff --git a/test/fixtures/deploy-phoenix-sqlite/rel/overlays/bin/migrate b/test/fixtures/deploy-phoenix-sqlite/rel/overlays/bin/migrate new file mode 100755 index 0000000000..050220a66e --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/rel/overlays/bin/migrate @@ -0,0 +1,5 @@ +#!/bin/sh +set -eu + +cd -P -- "$(dirname -- "$0")" +exec ./deploy_phoenix_sqlite eval DeployPhoenixSqlite.Release.migrate diff --git a/test/fixtures/deploy-phoenix-sqlite/rel/overlays/bin/migrate.bat b/test/fixtures/deploy-phoenix-sqlite/rel/overlays/bin/migrate.bat new file mode 100755 index 0000000000..71cd06aa73 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/rel/overlays/bin/migrate.bat @@ -0,0 +1 @@ +call "%~dp0\deploy_phoenix_sqlite" eval DeployPhoenixSqlite.Release.migrate diff --git a/test/fixtures/deploy-phoenix-sqlite/rel/overlays/bin/server b/test/fixtures/deploy-phoenix-sqlite/rel/overlays/bin/server new file mode 100755 index 0000000000..15f35a72c6 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/rel/overlays/bin/server @@ -0,0 +1,5 @@ +#!/bin/sh +set -eu + +cd -P -- "$(dirname -- "$0")" +PHX_SERVER=true exec ./deploy_phoenix_sqlite start diff --git a/test/fixtures/deploy-phoenix-sqlite/rel/overlays/bin/server.bat b/test/fixtures/deploy-phoenix-sqlite/rel/overlays/bin/server.bat new file mode 100755 index 0000000000..90cb73f3f1 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/rel/overlays/bin/server.bat @@ -0,0 +1,2 @@ +set PHX_SERVER=true +call "%~dp0\deploy_phoenix_sqlite" start diff --git a/test/fixtures/deploy-phoenix-sqlite/test/deploy_phoenix_sqlite_web/controllers/error_html_test.exs b/test/fixtures/deploy-phoenix-sqlite/test/deploy_phoenix_sqlite_web/controllers/error_html_test.exs new file mode 100644 index 0000000000..34f6e02e46 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/test/deploy_phoenix_sqlite_web/controllers/error_html_test.exs @@ -0,0 +1,14 @@ +defmodule DeployPhoenixSqliteWeb.ErrorHTMLTest do + use DeployPhoenixSqliteWeb.ConnCase, async: true + + # Bring render_to_string/4 for testing custom views + import Phoenix.Template + + test "renders 404.html" do + assert render_to_string(DeployPhoenixSqliteWeb.ErrorHTML, "404", "html", []) == "Not Found" + end + + test "renders 500.html" do + assert render_to_string(DeployPhoenixSqliteWeb.ErrorHTML, "500", "html", []) == "Internal Server Error" + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite/test/deploy_phoenix_sqlite_web/controllers/error_json_test.exs b/test/fixtures/deploy-phoenix-sqlite/test/deploy_phoenix_sqlite_web/controllers/error_json_test.exs new file mode 100644 index 0000000000..8751894711 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/test/deploy_phoenix_sqlite_web/controllers/error_json_test.exs @@ -0,0 +1,12 @@ +defmodule DeployPhoenixSqliteWeb.ErrorJSONTest do + use DeployPhoenixSqliteWeb.ConnCase, async: true + + test "renders 404" do + assert DeployPhoenixSqliteWeb.ErrorJSON.render("404.json", %{}) == %{errors: %{detail: "Not Found"}} + end + + test "renders 500" do + assert DeployPhoenixSqliteWeb.ErrorJSON.render("500.json", %{}) == + %{errors: %{detail: "Internal Server Error"}} + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite/test/deploy_phoenix_sqlite_web/controllers/page_controller_test.exs b/test/fixtures/deploy-phoenix-sqlite/test/deploy_phoenix_sqlite_web/controllers/page_controller_test.exs new file mode 100644 index 0000000000..f0259ab5a8 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/test/deploy_phoenix_sqlite_web/controllers/page_controller_test.exs @@ -0,0 +1,8 @@ +defmodule DeployPhoenixSqliteWeb.PageControllerTest do + use DeployPhoenixSqliteWeb.ConnCase + + test "GET /", %{conn: conn} do + conn = get(conn, ~p"/") + assert html_response(conn, 200) =~ "Peace of mind from prototype to production" + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite/test/support/conn_case.ex b/test/fixtures/deploy-phoenix-sqlite/test/support/conn_case.ex new file mode 100644 index 0000000000..fafa4889fe --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/test/support/conn_case.ex @@ -0,0 +1,38 @@ +defmodule DeployPhoenixSqliteWeb.ConnCase do + @moduledoc """ + This module defines the test case to be used by + tests that require setting up a connection. + + Such tests rely on `Phoenix.ConnTest` and also + import other functionality to make it easier + to build common data structures and query the data layer. + + Finally, if the test case interacts with the database, + we enable the SQL sandbox, so changes done to the database + are reverted at the end of every test. If you are using + PostgreSQL, you can even run database tests asynchronously + by setting `use DeployPhoenixSqliteWeb.ConnCase, async: true`, although + this option is not recommended for other databases. + """ + + use ExUnit.CaseTemplate + + using do + quote do + # The default endpoint for testing + @endpoint DeployPhoenixSqliteWeb.Endpoint + + use DeployPhoenixSqliteWeb, :verified_routes + + # Import conveniences for testing with connections + import Plug.Conn + import Phoenix.ConnTest + import DeployPhoenixSqliteWeb.ConnCase + end + end + + setup tags do + DeployPhoenixSqlite.DataCase.setup_sandbox(tags) + {:ok, conn: Phoenix.ConnTest.build_conn()} + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite/test/support/data_case.ex b/test/fixtures/deploy-phoenix-sqlite/test/support/data_case.ex new file mode 100644 index 0000000000..898edfa4d9 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/test/support/data_case.ex @@ -0,0 +1,58 @@ +defmodule DeployPhoenixSqlite.DataCase do + @moduledoc """ + This module defines the setup for tests requiring + access to the application's data layer. + + You may define functions here to be used as helpers in + your tests. + + Finally, if the test case interacts with the database, + we enable the SQL sandbox, so changes done to the database + are reverted at the end of every test. If you are using + PostgreSQL, you can even run database tests asynchronously + by setting `use DeployPhoenixSqlite.DataCase, async: true`, although + this option is not recommended for other databases. + """ + + use ExUnit.CaseTemplate + + using do + quote do + alias DeployPhoenixSqlite.Repo + + import Ecto + import Ecto.Changeset + import Ecto.Query + import DeployPhoenixSqlite.DataCase + end + end + + setup tags do + DeployPhoenixSqlite.DataCase.setup_sandbox(tags) + :ok + end + + @doc """ + Sets up the sandbox based on the test tags. + """ + def setup_sandbox(tags) do + pid = Ecto.Adapters.SQL.Sandbox.start_owner!(DeployPhoenixSqlite.Repo, shared: not tags[:async]) + on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(pid) end) + end + + @doc """ + A helper that transforms changeset errors into a map of messages. + + assert {:error, changeset} = Accounts.create_user(%{password: "short"}) + assert "password is too short" in errors_on(changeset).password + assert %{password: ["password is too short"]} = errors_on(changeset) + + """ + def errors_on(changeset) do + Ecto.Changeset.traverse_errors(changeset, fn {message, opts} -> + Regex.replace(~r"%{(\w+)}", message, fn _, key -> + opts |> Keyword.get(String.to_existing_atom(key), key) |> to_string() + end) + end) + end +end diff --git a/test/fixtures/deploy-phoenix-sqlite/test/test_helper.exs b/test/fixtures/deploy-phoenix-sqlite/test/test_helper.exs new file mode 100644 index 0000000000..e01419d2c4 --- /dev/null +++ b/test/fixtures/deploy-phoenix-sqlite/test/test_helper.exs @@ -0,0 +1,2 @@ +ExUnit.start() +Ecto.Adapters.SQL.Sandbox.mode(DeployPhoenixSqlite.Repo, :manual) From 3a289134d844a37dfab59df4f2ea4dbdcab1bd3d Mon Sep 17 00:00:00 2001 From: Christopher Louvet Date: Tue, 18 Mar 2025 09:00:41 +0700 Subject: [PATCH 097/104] Port https://github.com/superfly/flyctl/pull/4247 into deployer branch (#4253) --- internal/config/config.go | 1 + internal/metrics/db.go | 68 +++++++++++++++++++++++++++++---------- 2 files changed, 52 insertions(+), 17 deletions(-) diff --git a/internal/config/config.go b/internal/config/config.go index 04583ca574..ef454f13f0 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -154,6 +154,7 @@ func (cfg *Config) applyEnv() { cfg.APIBaseURL = env.FirstOrDefault(cfg.APIBaseURL, apiBaseURLEnvKey) cfg.FlapsBaseURL = env.FirstOrDefault(cfg.FlapsBaseURL, flapsBaseURLEnvKey) cfg.MetricsBaseURL = env.FirstOrDefault(cfg.MetricsBaseURL, metricsBaseURLEnvKey) + cfg.MetricsToken = env.FirstOrDefault(cfg.MetricsToken, MetricsTokenEnvKey, AccessTokenEnvKey, APITokenEnvKey) cfg.SyntheticsBaseURL = env.FirstOrDefault(cfg.SyntheticsBaseURL, syntheticsBaseURLEnvKey) cfg.SendMetrics = env.IsTruthy(SendMetricsEnvKey) || cfg.SendMetrics cfg.SyntheticsAgent = env.IsTruthy(SyntheticsAgentEnvKey) || cfg.SyntheticsAgent diff --git a/internal/metrics/db.go b/internal/metrics/db.go index ed672fe366..c66b49e4f2 100644 --- a/internal/metrics/db.go +++ b/internal/metrics/db.go @@ -38,8 +38,6 @@ func FlushMetrics(ctx context.Context) error { iostream := iostreams.FromContext(ctx) - // On CI, always block on metrics send. This sucks, but the alternative is not getting metrics from CI at all. There are timeouts in place to prevent this from taking more than 15 seconds - if iostream.IsInteractive() { flyctl, err := os.Executable() if err != nil { @@ -70,39 +68,75 @@ func FlushMetrics(ctx context.Context) error { return err } } else { + // Don't check for errors in non-interactive mode + // because we don't want to impact other operations + // if metrics sending fails. SendMetrics(ctx, string(json)) } return nil } -// / Spens up to 15 seconds sending all metrics collected so far to flyctl-metrics post endpoint -func SendMetrics(ctx context.Context, json string) error { - authToken, err := GetMetricsToken(ctx) +func SendMetrics(ctx context.Context, jsonData string) error { + cfg := config.FromContext(ctx) + metricsToken, err := GetMetricsToken(ctx) if err != nil { - return err + fmt.Fprintf(os.Stderr, "Warning: Metrics token unavailable: %v\n", err) + return nil } - cfg := config.FromContext(ctx) - request, err := http.NewRequest("POST", cfg.MetricsBaseURL+"/metrics_post", bytes.NewBuffer([]byte(json))) + baseURL := cfg.MetricsBaseURL + endpoint := baseURL + "/metrics_post" + userAgent := fmt.Sprintf("flyctl/%s", buildinfo.Info().Version) + + timeoutCtx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + + err = sendMetricsRequest(timeoutCtx, endpoint, metricsToken, userAgent, []byte(jsonData)) if err != nil { + fmt.Fprintf(os.Stderr, "Warning: Metrics send issue: %v\n", err) return err } - request.Header.Set("Authorization", authToken) - request.Header.Set("User-Agent", fmt.Sprintf("flyctl/%s", buildinfo.Info().Version)) - - retryTransport := rehttp.NewTransport(http.DefaultTransport, rehttp.RetryAll(rehttp.RetryMaxRetries(3), rehttp.RetryTimeoutErr()), rehttp.ConstDelay(0)) + return nil +} - client := http.Client{ - Transport: retryTransport, - Timeout: time.Second * 5, +func sendMetricsRequest(ctx context.Context, endpoint, token, userAgent string, data []byte) error { + request, err := http.NewRequestWithContext(ctx, "POST", endpoint, bytes.NewBuffer(data)) + if err != nil { + return fmt.Errorf("failed to create request: %w", err) } + request.Header.Set("Authorization", "Bearer "+token) + request.Header.Set("User-Agent", userAgent) + + client := createHTTPClient() + resp, err := client.Do(request) if err != nil { - return err + return fmt.Errorf("failed to send metrics: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("metrics send failed with status %d", resp.StatusCode) } - return resp.Body.Close() + return nil +} + +func createHTTPClient() *http.Client { + retryTransport := rehttp.NewTransport( + http.DefaultTransport, + rehttp.RetryAll( + rehttp.RetryMaxRetries(3), + rehttp.RetryTimeoutErr(), + ), + rehttp.ConstDelay(0), + ) + + return &http.Client{ + Transport: retryTransport, + Timeout: time.Second * 5, + } } From bcffc53016860c25d830760eec83bb0484889c5a Mon Sep 17 00:00:00 2001 From: Lubien Date: Wed, 9 Jul 2025 15:53:46 -0300 Subject: [PATCH 098/104] Deploy image ref (#4471) * add DEPLOY_IMAGE_REF support * fix syntax * Do not build if DEPLOY_IMAGE_REF * add custom command and skip build support * ignore App not found errors on preflight cleanup * do not try flyctl config show --local on CUSTOM_COMMAND * fix exit --- deploy.rb | 57 +++++++++++++++++++++++--------- scripts/delete_preflight_apps.sh | 2 +- 2 files changed, 42 insertions(+), 17 deletions(-) diff --git a/deploy.rb b/deploy.rb index 3b6c4d7b4d..5baa679103 100755 --- a/deploy.rb +++ b/deploy.rb @@ -24,6 +24,12 @@ CREATE_AND_PUSH_BRANCH = !get_env("DEPLOY_CREATE_AND_PUSH_BRANCH").nil? FLYIO_BRANCH_NAME = "flyio-new-files" +CUSTOM_COMMAND = get_env("CUSTOM_COMMAND") +DEPLOY_IMAGE_REF = get_env("DEPLOY_IMAGE_REF") +SKIP_BUILD = !get_env("SKIP_BUILD").nil? + +DO_SKIP_BUILD = SKIP_BUILD || !CUSTOM_COMMAND.nil? || !DEPLOY_IMAGE_REF.nil? + DEPLOY_TRIGGER = get_env("DEPLOY_TRIGGER") DEPLOYER_FLY_CONFIG_PATH = get_env("DEPLOYER_FLY_CONFIG_PATH") DEPLOYER_SOURCE_CWD = get_env("DEPLOYER_SOURCE_CWD") @@ -74,7 +80,9 @@ steps.push({id: Step::CUSTOMIZE, description: "Customize deployment plan"}) if DEPLOY_CUSTOMIZE else # only deploying, so we need to send the artifacts right away - steps.push({id: Step::BUILD, description: "Build image"}) + if !DO_SKIP_BUILD + steps.push({id: Step::BUILD, description: "Build image"}) + end steps.push({id: Step::DEPLOY, description: "Deploy application"}) if DEPLOY_NOW artifact Artifact::META, { steps: steps } end @@ -83,7 +91,6 @@ in_step Step::GIT_PULL do ref = get_env("GIT_REF") artifact Artifact::GIT_INFO, { repository: GIT_REPO, reference: ref } - exec_capture("git init", log: false) redacted_repo_url = GIT_REPO_URL.dup @@ -206,7 +213,7 @@ plugin = FLYCTL_TO_ASDF_PLUGIN_NAME.fetch(RUNTIME_LANGUAGE, RUNTIME_LANGUAGE) if plugin == "elixir" # required for elixir to work - exec_capture("asdf install erlang #{DEFAULT_ERLANG_VERSION}") + exec_capture("asdf install erlang #{DEFAULT_ERLANG_VERSION}") end exec_capture("asdf install #{plugin} #{version}") else @@ -280,7 +287,9 @@ SENTRY = manifest.dig("plan", "sentry") == true steps.push({id: Step::GENERATE_BUILD_REQUIREMENTS, description: "Generate requirements for build"}) if DO_GEN_REQS - steps.push({id: Step::BUILD, description: "Build image"}) + if !DO_SKIP_BUILD + steps.push({id: Step::BUILD, description: "Build image"}) + end steps.push({id: Step::FLY_POSTGRES_CREATE, description: "Create and attach PostgreSQL database"}) if FLY_PG steps.push({id: Step::SUPABASE_POSTGRES, description: "Create Supabase PostgreSQL database"}) if SUPABASE steps.push({id: Step::UPSTASH_REDIS, description: "Create Upstash Redis database"}) if UPSTASH @@ -311,20 +320,32 @@ end # TODO: better error if missing config -fly_config = manifest && manifest.dig("config") || JSON.parse(exec_capture("flyctl config show --local #{CONFIG_COMMAND_STRING}", log: false)) +fly_config = manifest && manifest.dig("config") +if !fly_config && CUSTOM_COMMAND.nil? + fly_config = JSON.parse(exec_capture("flyctl config show --local #{CONFIG_COMMAND_STRING}", log: false)) +end + APP_NAME = DEPLOY_APP_NAME || fly_config["app"] -image_ref = in_step Step::BUILD do - image_tag = "deployment-#{SecureRandom.hex(16)}" - if (image_ref = fly_config.dig("build","image")&.strip) && !image_ref.nil? && !image_ref.empty? - info("Skipping build, using image defined in fly config: #{image_ref}") - image_ref +image_ref = if !DEPLOY_IMAGE_REF.nil? + DEPLOY_IMAGE_REF +else + if DO_SKIP_BUILD + nil else - image_ref = "registry.fly.io/#{APP_NAME}:#{image_tag}" + in_step Step::BUILD do + image_tag = "deployment-#{SecureRandom.hex(16)}" + if (image_ref = fly_config.dig("build","image")&.strip) && !image_ref.nil? && !image_ref.empty? + info("Skipping build, using image defined in fly config: #{image_ref}") + image_ref + else + image_ref = "registry.fly.io/#{APP_NAME}:#{image_tag}" - exec_capture("flyctl deploy --build-only --push -a #{APP_NAME} --image-label #{image_tag} #{CONFIG_COMMAND_STRING}") - artifact Artifact::DOCKER_IMAGE, { ref: image_ref } - image_ref + exec_capture("flyctl deploy --build-only --push -a #{APP_NAME} --image-label #{image_tag} #{CONFIG_COMMAND_STRING}") + artifact Artifact::DOCKER_IMAGE, { ref: image_ref } + image_ref + end + end end end @@ -427,7 +448,11 @@ if DEPLOY_NOW in_step Step::DEPLOY do - exec_capture("flyctl deploy -a #{APP_NAME} --image #{image_ref} --depot-scope=app #{CONFIG_COMMAND_STRING}") + if CUSTOM_COMMAND.nil? + exec_capture("flyctl deploy -a #{APP_NAME} --image #{image_ref} --depot-scope=app #{CONFIG_COMMAND_STRING}") + else + exec_capture(CUSTOM_COMMAND) + end end end @@ -458,4 +483,4 @@ $stderr.flush sleep 1.0 -end \ No newline at end of file +end diff --git a/scripts/delete_preflight_apps.sh b/scripts/delete_preflight_apps.sh index 5546d3c4a9..974f0704ca 100755 --- a/scripts/delete_preflight_apps.sh +++ b/scripts/delete_preflight_apps.sh @@ -13,5 +13,5 @@ do continue fi echo "Destroy $app" - flyctl apps destroy --yes "${app}" + flyctl apps destroy --yes "${app}" || true done From 178c73b6893b465cc126dd9d1d789871de4d6f5b Mon Sep 17 00:00:00 2001 From: Lubien Date: Tue, 7 Oct 2025 12:18:55 -0300 Subject: [PATCH 099/104] Deployer update now (#4593) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Bump the tracing group with 6 updates (#4263) Bumps the tracing group with 6 updates: | Package | From | To | | --- | --- | --- | | [go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp](https://github.com/open-telemetry/opentelemetry-go-contrib) | `0.59.0` | `0.60.0` | | [go.opentelemetry.io/otel](https://github.com/open-telemetry/opentelemetry-go) | `1.34.0` | `1.35.0` | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp](https://github.com/open-telemetry/opentelemetry-go) | `1.34.0` | `1.35.0` | | [go.opentelemetry.io/otel/exporters/stdout/stdouttrace](https://github.com/open-telemetry/opentelemetry-go) | `1.34.0` | `1.35.0` | | [go.opentelemetry.io/otel/sdk](https://github.com/open-telemetry/opentelemetry-go) | `1.34.0` | `1.35.0` | | [go.opentelemetry.io/otel/trace](https://github.com/open-telemetry/opentelemetry-go) | `1.34.0` | `1.35.0` | Updates `go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp` from 0.59.0 to 0.60.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go-contrib/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go-contrib/compare/zpages/v0.59.0...zpages/v0.60.0) Updates `go.opentelemetry.io/otel` from 1.34.0 to 1.35.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.34.0...v1.35.0) Updates `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` from 1.34.0 to 1.35.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.34.0...v1.35.0) Updates `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` from 1.34.0 to 1.35.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.34.0...v1.35.0) Updates `go.opentelemetry.io/otel/sdk` from 1.34.0 to 1.35.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.34.0...v1.35.0) Updates `go.opentelemetry.io/otel/trace` from 1.34.0 to 1.35.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.34.0...v1.35.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp dependency-type: direct:production update-type: version-update:semver-minor dependency-group: tracing - dependency-name: go.opentelemetry.io/otel dependency-type: direct:production update-type: version-update:semver-minor dependency-group: tracing - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp dependency-type: direct:production update-type: version-update:semver-minor dependency-group: tracing - dependency-name: go.opentelemetry.io/otel/exporters/stdout/stdouttrace dependency-type: direct:production update-type: version-update:semver-minor dependency-group: tracing - dependency-name: go.opentelemetry.io/otel/sdk dependency-type: direct:production update-type: version-update:semver-minor dependency-group: tracing - dependency-name: go.opentelemetry.io/otel/trace dependency-type: direct:production update-type: version-update:semver-minor dependency-group: tracing ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump github.com/vektah/gqlparser/v2 from 2.5.22 to 2.5.23 (#4243) Bumps [github.com/vektah/gqlparser/v2](https://github.com/vektah/gqlparser) from 2.5.22 to 2.5.23. - [Release notes](https://github.com/vektah/gqlparser/releases) - [Commits](https://github.com/vektah/gqlparser/compare/v2.5.22...v2.5.23) --- updated-dependencies: - dependency-name: github.com/vektah/gqlparser/v2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump github.com/nats-io/nats.go from 1.39.0 to 1.39.1 (#4231) Bumps [github.com/nats-io/nats.go](https://github.com/nats-io/nats.go) from 1.39.0 to 1.39.1. - [Release notes](https://github.com/nats-io/nats.go/releases) - [Commits](https://github.com/nats-io/nats.go/compare/v1.39.0...v1.39.1) --- updated-dependencies: - dependency-name: github.com/nats-io/nats.go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump github.com/spf13/cobra from 1.8.1 to 1.9.1 (#4225) Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.8.1 to 1.9.1. - [Release notes](https://github.com/spf13/cobra/releases) - [Commits](https://github.com/spf13/cobra/compare/v1.8.1...v1.9.1) --- updated-dependencies: - dependency-name: github.com/spf13/cobra dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump rack from 2.2.11 to 2.2.13 in /test/preflight/fixtures/example-buildpack (#4248) Bump rack in /test/preflight/fixtures/example-buildpack Bumps [rack](https://github.com/rack/rack) from 2.2.11 to 2.2.13. - [Release notes](https://github.com/rack/rack/releases) - [Changelog](https://github.com/rack/rack/blob/main/CHANGELOG.md) - [Commits](https://github.com/rack/rack/compare/v2.2.11...v2.2.13) --- updated-dependencies: - dependency-name: rack dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump github.com/spf13/viper from 1.19.0 to 1.20.0 (#4268) Bumps [github.com/spf13/viper](https://github.com/spf13/viper) from 1.19.0 to 1.20.0. - [Release notes](https://github.com/spf13/viper/releases) - [Commits](https://github.com/spf13/viper/compare/v1.19.0...v1.20.0) --- updated-dependencies: - dependency-name: github.com/spf13/viper dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump github.com/avast/retry-go/v4 from 4.6.0 to 4.6.1 (#4266) Bumps [github.com/avast/retry-go/v4](https://github.com/avast/retry-go) from 4.6.0 to 4.6.1. - [Release notes](https://github.com/avast/retry-go/releases) - [Commits](https://github.com/avast/retry-go/compare/4.6.0...4.6.1) --- updated-dependencies: - dependency-name: github.com/avast/retry-go/v4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump github.com/moby/buildkit from 0.20.0 to 0.20.2 (#4267) Bumps [github.com/moby/buildkit](https://github.com/moby/buildkit) from 0.20.0 to 0.20.2. - [Release notes](https://github.com/moby/buildkit/releases) - [Commits](https://github.com/moby/buildkit/compare/v0.20.0...v0.20.2) --- updated-dependencies: - dependency-name: github.com/moby/buildkit dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump the aws-sdk group with 3 updates (#4265) Bumps the aws-sdk group with 3 updates: [github.com/aws/aws-sdk-go-v2/config](https://github.com/aws/aws-sdk-go-v2), [github.com/aws/aws-sdk-go-v2/credentials](https://github.com/aws/aws-sdk-go-v2) and [github.com/aws/aws-sdk-go-v2/service/s3](https://github.com/aws/aws-sdk-go-v2). Updates `github.com/aws/aws-sdk-go-v2/config` from 1.29.8 to 1.29.10 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.29.8...config/v1.29.10) Updates `github.com/aws/aws-sdk-go-v2/credentials` from 1.17.61 to 1.17.63 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/credentials/v1.17.61...credentials/v1.17.63) Updates `github.com/aws/aws-sdk-go-v2/service/s3` from 1.78.0 to 1.78.2 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.78.0...service/s3/v1.78.2) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/config dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk - dependency-name: github.com/aws/aws-sdk-go-v2/credentials dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk - dependency-name: github.com/aws/aws-sdk-go-v2/service/s3 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump github.com/superfly/fly-go from 0.1.41 to 0.1.42 (#4271) Bumps [github.com/superfly/fly-go](https://github.com/superfly/fly-go) from 0.1.41 to 0.1.42. - [Release notes](https://github.com/superfly/fly-go/releases) - [Changelog](https://github.com/superfly/fly-go/blob/main/resource_releases.go) - [Commits](https://github.com/superfly/fly-go/compare/v0.1.41...v0.1.42) --- updated-dependencies: - dependency-name: github.com/superfly/fly-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump github.com/miekg/dns from 1.1.63 to 1.1.64 (#4270) Bumps [github.com/miekg/dns](https://github.com/miekg/dns) from 1.1.63 to 1.1.64. - [Changelog](https://github.com/miekg/dns/blob/master/Makefile.release) - [Commits](https://github.com/miekg/dns/compare/v1.1.63...v1.1.64) --- updated-dependencies: - dependency-name: github.com/miekg/dns dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump github.com/muesli/termenv from 0.15.2 to 0.16.0 (#4272) Bumps [github.com/muesli/termenv](https://github.com/muesli/termenv) from 0.15.2 to 0.16.0. - [Release notes](https://github.com/muesli/termenv/releases) - [Commits](https://github.com/muesli/termenv/compare/v0.15.2...v0.16.0) --- updated-dependencies: - dependency-name: github.com/muesli/termenv dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump the aws-sdk group with 2 updates (#4269) Bumps the aws-sdk group with 2 updates: [github.com/aws/aws-sdk-go-v2/config](https://github.com/aws/aws-sdk-go-v2) and [github.com/aws/aws-sdk-go-v2/credentials](https://github.com/aws/aws-sdk-go-v2). Updates `github.com/aws/aws-sdk-go-v2/config` from 1.29.10 to 1.29.11 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.29.10...config/v1.29.11) Updates `github.com/aws/aws-sdk-go-v2/credentials` from 1.17.63 to 1.17.64 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/credentials/v1.17.63...credentials/v1.17.64) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/config dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk - dependency-name: github.com/aws/aws-sdk-go-v2/credentials dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Check for cancelled context when updating machines (#4280) * check for cancelled context and sort machines by id * remove log and select statement * Bump github.com/spf13/viper from 1.20.0 to 1.20.1 (#4275) Bumps [github.com/spf13/viper](https://github.com/spf13/viper) from 1.20.0 to 1.20.1. - [Release notes](https://github.com/spf13/viper/releases) - [Commits](https://github.com/spf13/viper/compare/v1.20.0...v1.20.1) --- updated-dependencies: - dependency-name: github.com/spf13/viper dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * support `fly wireguard create --network [custom-id]` (#3822) add network flag to wireguard create * handleFlexFailoverFail: fix nil pointer reference (#4285) Don't attempt to release lease if not found. * Bump golang.org/x/net from 0.37.0 to 0.38.0 in the golangx group (#4284) Bumps the golangx group with 1 update: [golang.org/x/net](https://github.com/golang/net). Updates `golang.org/x/net` from 0.37.0 to 0.38.0 - [Commits](https://github.com/golang/net/compare/v0.37.0...v0.38.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump the aws-sdk group with 3 updates (#4286) Bumps the aws-sdk group with 3 updates: [github.com/aws/aws-sdk-go-v2/config](https://github.com/aws/aws-sdk-go-v2), [github.com/aws/aws-sdk-go-v2/credentials](https://github.com/aws/aws-sdk-go-v2) and [github.com/aws/aws-sdk-go-v2/service/s3](https://github.com/aws/aws-sdk-go-v2). Updates `github.com/aws/aws-sdk-go-v2/config` from 1.29.11 to 1.29.12 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.29.11...config/v1.29.12) Updates `github.com/aws/aws-sdk-go-v2/credentials` from 1.17.64 to 1.17.65 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/credentials/v1.17.64...credentials/v1.17.65) Updates `github.com/aws/aws-sdk-go-v2/service/s3` from 1.78.2 to 1.79.0 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.78.2...service/s3/v1.79.0) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/config dependency-version: 1.29.12 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk - dependency-name: github.com/aws/aws-sdk-go-v2/credentials dependency-version: 1.17.65 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk - dependency-name: github.com/aws/aws-sdk-go-v2/service/s3 dependency-version: 1.79.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: aws-sdk ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * daily gpu preflight tests are causing unnecesary load (#4290) * create `fly mpg attach` (#4282) * create `fly mpg attach` * Remove dead func * Better wait and error feedback * Fix comments and spaces for parsing env secrets (#4294) * Fix #3002 parsing spaces around equals in env var Fixes #3002 * Fix #4291 supporting commments at the end of env var lines Fixes #4291 * Bump the aws-sdk group with 3 updates (#4287) Bumps the aws-sdk group with 3 updates: [github.com/aws/aws-sdk-go-v2/config](https://github.com/aws/aws-sdk-go-v2), [github.com/aws/aws-sdk-go-v2/credentials](https://github.com/aws/aws-sdk-go-v2) and [github.com/aws/aws-sdk-go-v2/service/s3](https://github.com/aws/aws-sdk-go-v2). Updates `github.com/aws/aws-sdk-go-v2/config` from 1.29.12 to 1.29.13 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.29.12...config/v1.29.13) Updates `github.com/aws/aws-sdk-go-v2/credentials` from 1.17.65 to 1.17.66 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/credentials/v1.17.65...credentials/v1.17.66) Updates `github.com/aws/aws-sdk-go-v2/service/s3` from 1.79.0 to 1.79.1 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.79.0...service/s3/v1.79.1) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/config dependency-version: 1.29.13 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk - dependency-name: github.com/aws/aws-sdk-go-v2/credentials dependency-version: 1.17.66 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk - dependency-name: github.com/aws/aws-sdk-go-v2/service/s3 dependency-version: 1.79.1 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * limit shell app names to 63 chars (#4296) * Add .venv to Python .dockerignore (#4299) - Fixes #3853 * Bump the golangx group with 6 updates (#4303) Bumps the golangx group with 6 updates: | Package | From | To | | --- | --- | --- | | [golang.org/x/crypto](https://github.com/golang/crypto) | `0.36.0` | `0.37.0` | | [golang.org/x/net](https://github.com/golang/net) | `0.38.0` | `0.39.0` | | [golang.org/x/sync](https://github.com/golang/sync) | `0.12.0` | `0.13.0` | | [golang.org/x/sys](https://github.com/golang/sys) | `0.31.0` | `0.32.0` | | [golang.org/x/term](https://github.com/golang/term) | `0.30.0` | `0.31.0` | | [golang.org/x/text](https://github.com/golang/text) | `0.23.0` | `0.24.0` | Updates `golang.org/x/crypto` from 0.36.0 to 0.37.0 - [Commits](https://github.com/golang/crypto/compare/v0.36.0...v0.37.0) Updates `golang.org/x/net` from 0.38.0 to 0.39.0 - [Commits](https://github.com/golang/net/compare/v0.38.0...v0.39.0) Updates `golang.org/x/sync` from 0.12.0 to 0.13.0 - [Commits](https://github.com/golang/sync/compare/v0.12.0...v0.13.0) Updates `golang.org/x/sys` from 0.31.0 to 0.32.0 - [Commits](https://github.com/golang/sys/compare/v0.31.0...v0.32.0) Updates `golang.org/x/term` from 0.30.0 to 0.31.0 - [Commits](https://github.com/golang/term/compare/v0.30.0...v0.31.0) Updates `golang.org/x/text` from 0.23.0 to 0.24.0 - [Release notes](https://github.com/golang/text/releases) - [Commits](https://github.com/golang/text/compare/v0.23.0...v0.24.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-version: 0.37.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx - dependency-name: golang.org/x/net dependency-version: 0.39.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx - dependency-name: golang.org/x/sync dependency-version: 0.13.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx - dependency-name: golang.org/x/sys dependency-version: 0.32.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx - dependency-name: golang.org/x/term dependency-version: 0.31.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx - dependency-name: golang.org/x/text dependency-version: 0.24.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Support for managing custom domains for Tigris storage buckets (#4302) * feat: honor BUILDKIT_PROGRESS as like Docker (#4249) BUILDKIT_PROGRESS=plain is especially useful for debugging. Before this change, it only works when DOCKER_BUILDKIT=false. * Bump github.com/nats-io/nats.go from 1.39.1 to 1.41.0 (#4288) Bumps [github.com/nats-io/nats.go](https://github.com/nats-io/nats.go) from 1.39.1 to 1.41.0. - [Release notes](https://github.com/nats-io/nats.go/releases) - [Commits](https://github.com/nats-io/nats.go/compare/v1.39.1...v1.41.0) --- updated-dependencies: - dependency-name: github.com/nats-io/nats.go dependency-version: 1.41.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump github.com/pkg/sftp from 1.13.7 to 1.13.9 (#4274) Bumps [github.com/pkg/sftp](https://github.com/pkg/sftp) from 1.13.7 to 1.13.9. - [Release notes](https://github.com/pkg/sftp/releases) - [Commits](https://github.com/pkg/sftp/compare/v1.13.7...v1.13.9) --- updated-dependencies: - dependency-name: github.com/pkg/sftp dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump github.com/nats-io/nats.go from 1.41.0 to 1.41.1 (#4305) Bumps [github.com/nats-io/nats.go](https://github.com/nats-io/nats.go) from 1.41.0 to 1.41.1. - [Release notes](https://github.com/nats-io/nats.go/releases) - [Commits](https://github.com/nats-io/nats.go/compare/v1.41.0...v1.41.1) --- updated-dependencies: - dependency-name: github.com/nats-io/nats.go dependency-version: 1.41.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump google.golang.org/grpc from 1.71.0 to 1.71.1 (#4304) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.71.0 to 1.71.1. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.71.0...v1.71.1) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-version: 1.71.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump github.com/superfly/fly-go from 0.1.42 to 0.1.43 (#4306) Bumps [github.com/superfly/fly-go](https://github.com/superfly/fly-go) from 0.1.42 to 0.1.43. - [Release notes](https://github.com/superfly/fly-go/releases) - [Changelog](https://github.com/superfly/fly-go/blob/main/resource_releases.go) - [Commits](https://github.com/superfly/fly-go/compare/v0.1.42...v0.1.43) --- updated-dependencies: - dependency-name: github.com/superfly/fly-go dependency-version: 0.1.43 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * initial mcp implementation (#4307) * initial mcp implementation * mcp proxy and wrap * add basic auth * fix typo (#4311) * fill in more details in the mcp docs (#4312) * fix mpg connect (#4314) * fix: clear machine lease nonce in releaseLeases (#4313) In the edge-case where a fly.Machine is updated twice in a single deploy (which currently happens with a single machine in 'canary' deployments), the lease won't get acquired during the second deploy if a nonce still exists on the Machine struct. Fixes "lease not found" error on cleanup for canary deploys. * Bump github.com/prometheus/client_golang from 1.20.5 to 1.22.0 (#4310) Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.20.5 to 1.22.0. - [Release notes](https://github.com/prometheus/client_golang/releases) - [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md) - [Commits](https://github.com/prometheus/client_golang/compare/v1.20.5...v1.22.0) --- updated-dependencies: - dependency-name: github.com/prometheus/client_golang dependency-version: 1.22.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump github.com/getsentry/sentry-go from 0.31.1 to 0.32.0 (#4309) Bumps [github.com/getsentry/sentry-go](https://github.com/getsentry/sentry-go) from 0.31.1 to 0.32.0. - [Release notes](https://github.com/getsentry/sentry-go/releases) - [Changelog](https://github.com/getsentry/sentry-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/getsentry/sentry-go/compare/v0.31.1...v0.32.0) --- updated-dependencies: - dependency-name: github.com/getsentry/sentry-go dependency-version: 0.32.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump the aws-sdk group with 3 updates (#4308) Bumps the aws-sdk group with 3 updates: [github.com/aws/aws-sdk-go-v2/config](https://github.com/aws/aws-sdk-go-v2), [github.com/aws/aws-sdk-go-v2/credentials](https://github.com/aws/aws-sdk-go-v2) and [github.com/aws/aws-sdk-go-v2/service/s3](https://github.com/aws/aws-sdk-go-v2). Updates `github.com/aws/aws-sdk-go-v2/config` from 1.29.13 to 1.29.14 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.29.13...config/v1.29.14) Updates `github.com/aws/aws-sdk-go-v2/credentials` from 1.17.66 to 1.17.67 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/credentials/v1.17.66...credentials/v1.17.67) Updates `github.com/aws/aws-sdk-go-v2/service/s3` from 1.79.1 to 1.79.2 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.79.1...service/s3/v1.79.2) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/config dependency-version: 1.29.14 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk - dependency-name: github.com/aws/aws-sdk-go-v2/credentials dependency-version: 1.17.67 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk - dependency-name: github.com/aws/aws-sdk-go-v2/service/s3 dependency-version: 1.79.2 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump github.com/pelletier/go-toml/v2 from 2.2.3 to 2.2.4 (#4318) Bumps [github.com/pelletier/go-toml/v2](https://github.com/pelletier/go-toml) from 2.2.3 to 2.2.4. - [Release notes](https://github.com/pelletier/go-toml/releases) - [Changelog](https://github.com/pelletier/go-toml/blob/v2/.goreleaser.yaml) - [Commits](https://github.com/pelletier/go-toml/compare/v2.2.3...v2.2.4) --- updated-dependencies: - dependency-name: github.com/pelletier/go-toml/v2 dependency-version: 2.2.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump github.com/coder/websocket from 1.8.12 to 1.8.13 (#4319) Bumps [github.com/coder/websocket](https://github.com/coder/websocket) from 1.8.12 to 1.8.13. - [Release notes](https://github.com/coder/websocket/releases) - [Commits](https://github.com/coder/websocket/compare/v1.8.12...v1.8.13) --- updated-dependencies: - dependency-name: github.com/coder/websocket dependency-version: 1.8.13 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * fix: cancel acquiring a Depot machine if it takes > 5 minutes (#4315) Before this change, depotmachine.Acquire() was blocking indefinitely because we are not passing timeoutCtx. Instead of doing so, this change simplifies the flow by moving the timeout one above (at depotBuild). * Bump github.com/prometheus/client_model from 0.6.1 to 0.6.2 (#4322) Bumps [github.com/prometheus/client_model](https://github.com/prometheus/client_model) from 0.6.1 to 0.6.2. - [Release notes](https://github.com/prometheus/client_model/releases) - [Commits](https://github.com/prometheus/client_model/compare/v0.6.1...v0.6.2) --- updated-dependencies: - dependency-name: github.com/prometheus/client_model dependency-version: 0.6.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump github.com/nats-io/nats.go from 1.41.1 to 1.41.2 (#4324) Bumps [github.com/nats-io/nats.go](https://github.com/nats-io/nats.go) from 1.41.1 to 1.41.2. - [Release notes](https://github.com/nats-io/nats.go/releases) - [Commits](https://github.com/nats-io/nats.go/compare/v1.41.1...v1.41.2) --- updated-dependencies: - dependency-name: github.com/nats-io/nats.go dependency-version: 1.41.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Add --cluster flag to `fly mpg connect` and `fly mpg proxy`. (#4326) * Validate, default, and allow container to be selected (#4325) * Validate, default, and allow container to be selected if --container is specified validate that the selected machine is running containers, and has the container you specified if --container is not specified and the selected machine is running containers default to the first container (this matches previous behavior on selecting machines) if --selected is specified and there is more than one, prompt for selection Notes: --select now applies to *both* machines and containers --select now is ignored if there is only one possible selection fly ssh sftp now will respect the container selected * let user know when `fly mpg connect` fails due to cluster not being in ready state (#4327) * Launch MPG instead of Fly Postgres * Bump github.com/mark3labs/mcp-go from 0.19.0 to 0.23.1 (#4333) Bumps [github.com/mark3labs/mcp-go](https://github.com/mark3labs/mcp-go) from 0.19.0 to 0.23.1. - [Release notes](https://github.com/mark3labs/mcp-go/releases) - [Commits](https://github.com/mark3labs/mcp-go/compare/v0.19.0...v0.23.1) --- updated-dependencies: - dependency-name: github.com/mark3labs/mcp-go dependency-version: 0.23.1 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Add `status` and `list` MPG subcommands (#4332) * Revert "Launch MPG instead of Fly Postgres" This reverts commit b08302166dff7281a37e8240e54d98f41843a6d5. * update `fly mpg attach` help text to more explicitly explain what it … (#4335) update `fly mpg attach` help text to more explicitly explain what it does * Launch MPG instead of Fly Postgres (#4334) * Launch MPG instead of Fly Postgres * fetch raw slug for org from ui-ex if the slug that was passed in is `personal`, as the call to create an MPG cluster cannot understand `personal` slugs * include RequireUiex in the `launch` command * if the slug is `personal`, fetch the full org from ui-ex and use the raw slug, as the create cluster endpoint doesn't understand `personal` slugs * Require a name for mpg create, and fix MPG in launch * Add message about PG provisioning * allow moving on with deployment if MPG provision takes too long, and display better values for the MPG plan * Enable MPG provisioning from UI form --------- Co-authored-by: Jacob Fenton * Show deprecation notice for 'fly pg' commands (#4341) * Improvments for `mpg create` (#4342) Improvments for mpg create * Remove ams target region, reserved for Fly.io testing for now (#4343) * SImplify MPG attach to use default MPG user (#4344) * rewrite `fly mpg attach` to merely grab the pgbouncer connection URI and write it to a secret in the app; we are holding off the whole "create a new user and database" thing for now, see: https://flyio.discourse.team/t/fly-mpg-attach-is-getting-a-makeover/8438" * Ensure launch also uses standard MPG user attach --------- Co-authored-by: Jacob Fenton * get fly machine run and fly launch working with containers (#4331) * Update fly-go to split MachineCheck/MachineServiceCheck (#4349) * Update fly-go to split MachineCheck/MachineServiceCheck * appconfig: Fix broken definition test A fly-go change added omitempty to these fields * Drop Ubuntu 20.04 from CI install tests (#4350) No longer supported (and will be canceled/failed) by GitHub: https://github.com/actions/runner-images/issues/11101 * Allow `fly mcp proxy` access to your app's network via wireguard. (#4346) Pass an `--app` parameter to your `fly mcp proxy`; the app also will be inferred if the URL ends with .internal or .flycast * Bump google.golang.org/grpc from 1.71.1 to 1.72.0 (#4320) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.71.1 to 1.72.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.71.1...v1.72.0) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-version: 1.72.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump github.com/vektah/gqlparser/v2 from 2.5.23 to 2.5.26 (#4336) Bumps [github.com/vektah/gqlparser/v2](https://github.com/vektah/gqlparser) from 2.5.23 to 2.5.26. - [Release notes](https://github.com/vektah/gqlparser/releases) - [Commits](https://github.com/vektah/gqlparser/compare/v2.5.23...v2.5.26) --- updated-dependencies: - dependency-name: github.com/vektah/gqlparser/v2 dependency-version: 2.5.26 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Don't pick 'first' machine if specified machine is not found (#4300) - Fixes #3807 * Refactor mcp/server (#4355) * Refactor mcp/server * fly mcp server for volume * add platform commands * add fly mcp server -i option * add orgs and apps... (#4359) * add orgs * add apps and log region * fly machine * build and push mcp images (#4347) * MCP cleanup (#4361) proxy: add --inspector server: match flyctl version number wrap: make --mcp optional * minor improvements to proxy commands (#4352) * if config validate --app is passed and there is no local app, fetch remote (#4351) See: https://community.fly.io/t/receiving-oops-something-went-wrong-could-you-try-that-again-when-running-fly-config-validate-a-app-name/24740 * Mcp private proxies (#4362) * fly mcp wrap --private * Let user and password also be set as environment variables * flyio/mcp image += uv (#4365) * fly app create --save (#4364) Useful if the next commands are any of: * fly ips allocate * fly machine create * fly volumes create * fly apps open * ... * now that there is an inspect flag, it makes sense to default app (#4366) * Make machine run/create --command work with non-interactive use too (#4367) * support machine run --command in non-interactive mode too * move container to shared between machine run and machine create * MCP: Respond to HTML requests with a simple message (#4369) * Support HTTP bearer authentication for mcp proxy and wrap (#4368) * Revert "Make machine run/create --command work with non-interactive use too (#4367)" (#4370) This reverts commit e56bfb178b6a03e9afdb5bd2886829ce523976c8. * Add capacity info to `fly platform regions` (#4363) Adds a 'capacity' field to `fly platform regions` output, along with with a colorized column in the printed output. * Restore code for handling launch of unmanaged PG (#4371) * restore code for handling launch of unmanaged PG * don't fetch LD client from ctx (there doesn't seem to be one there), build a new service client instead * Remove kafka extension (#4373) * Mcp config (#4372) * Add MCP client config add/remove proxy commands * add fly mcp launch * mcp proxy: Don't require app name, instead load it if it is present (#4376) Don't require app name, instead load it if it is present * add inspector and various cleanup (#4377) * add inspector and various cleanup * fix launch long description * add auto-stop flag to fly mcp launch (#4379) * better error handling for create MPG 403 (#4375) * Mcp destroy (#4380) * app destroy * forgot a file * make fly mcp proxy client mcp aware * go mod tidy * latest inspector * split out inspect command * fly mcp launch += secrets * fly mcp launch += secrets, files, vm-sizes, ... * support volumes on fly mcp launch (and fly launch) (#4381) Also: * Support launching images directly with command overrides. * Change default of fly mcp launch to auto-stop=suspend * Mcp server config (#4384) * add --image support to fly mcp launch * fly mcp server --config * add mcp volume * fix lint errors * add --image support to fly mcp launch (#4383) * add --image support to fly mcp launch * mod tidy * Volume syntax errors (#4385) * fix syntax errors * hide mcp volume * Set --internal-port in launch plan before launcing web UI (#4390) * add --no-github-workflow flag to fly launch (#4387) * fly mcp list (#4392) * add mcp logs & refactor a bit (#4396) * support fly launch --secret (#4397) * stop setting up github workflows on template repositories (#4398) * fix: various MCP proxy issues (#4395) * fix: make sure to flush SSE headers to client Co-authored-by: Addison LeClair * fix: make sure SSE is ready before posting messages Co-authored-by: Addison LeClair --------- Co-authored-by: Addison LeClair * MCP wrap/proxy refactor and improvements (#4399) Proxy: - support --instance to specify target machine - if url resolves, don't create a wireguard proxy - move passthru support to a separate file (prep for supporting replay) Wrap: - produce an error when there is an attempt to connect a second get - ensure last byte is a newline on POST * use new secret keys api from fly-go for managing secret keys in `flyctl secrets keys`. (#4388) * use new secret keys api from fly-go for managing secret keys in `flyctl secrets keys`. * add fly machine place command (#4400) * machine place command, simple wrapper around platform/placements API. * update to fly-go v0.1.47 * use fsnotify to wait for agent restart (#4123) * use fsnotify to wait for agent restart Restarts the agent as soon as its socket is removed instead of a full 1 second. Fallback to a timeout on any error with the fsnotify watcher. * Check for host and status page issues async (#4125) * Check for host and status page issues async Fetch issues at the start of command, and display at the end. If the command completes before the check has completed, don't block waiting for the response. This optimizes for fast CLI command completion, optionally displaying issues when possible. * Increase scanner buffer size to support MCP responses up to 10MB (#4402) * Increase scanner buffer size to support MCP responses up to 10MB * Add basic access logging * token refresh improvements (#4281) * update go mod * refresh auth tokens in parallel * add comments * mcp wrap: only log requests if LOG_LEVEL=="debug" (#4405) * Update `PlatformMap` so Sentry extensions for Shopify apps work correctly (#4403) * fix: update Shopify platform to point to js * fix: change mapping to javascript-remix --------- Co-authored-by: Injoong Yoon <27440940+AttilaTheHen@users.noreply.github.com> * launch propose to stderr (#4408) also backport some stuff from the deployer branch * add sse and streaming support to fly mcp proxy (#4407) * Fix `fly launch --yes` so that it overwrites the deploy file without a prompt (#4406) fix: allow --yes flag to overwrite the deploy yml Co-authored-by: Injoong Yoon <27440940+AttilaTheHen@users.noreply.github.com> * add setup instructions to fly mcp launch (#4410) * fix fly mcp launch --setup (#4411) entrypoint already has fly mcp wrap image already has flyctl * Bump github.com/nats-io/nats.go from 1.41.2 to 1.43.0 (#4412) Bumps [github.com/nats-io/nats.go](https://github.com/nats-io/nats.go) from 1.41.2 to 1.43.0. - [Release notes](https://github.com/nats-io/nats.go/releases) - [Commits](https://github.com/nats-io/nats.go/compare/v1.41.2...v1.43.0) --- updated-dependencies: - dependency-name: github.com/nats-io/nats.go dependency-version: 1.43.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Shift mpg proxy to only listen on localhost (#4413) * Shift mpg proxy to only listen on localhost * add a flag for bind address --------- Co-authored-by: Sam Ruby * add selection to various mcp commands (#4415) * add selection to various mcp commands * fix mcp destroy config * better mcp error messages when run non-interactively (#4420) * skip pg import tests (#4419) * skip pg import tests * increase timeout * Really skip postgres import tests (#4423) * support mcp server running with SSE and HTTP Streaming transports (#4424) Authentication tokens come from (in priority order): * bearer-token on the request * --access-token on the fly mcp server command * FLY_ACCESS_TOKEN environment variable Default bind address is 127.0.0.1; to override specify --bind-addr * Support launching go MCP servers (#4425) Suoport launching go MCP servers Example: fly mcp launch "go run github.com/mark3labs/mcp-filesystem-server@latest /" * add mcp server secrets commands (#4426) * add mcp server secrets commands * stage unsets too * add cert commands to mcp server (#4429) * Bump the tracing group with 6 updates (#4427) Bumps the tracing group with 6 updates: | Package | From | To | | --- | --- | --- | | [go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp](https://github.com/open-telemetry/opentelemetry-go-contrib) | `0.60.0` | `0.61.0` | | [go.opentelemetry.io/otel](https://github.com/open-telemetry/opentelemetry-go) | `1.35.0` | `1.36.0` | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp](https://github.com/open-telemetry/opentelemetry-go) | `1.35.0` | `1.36.0` | | [go.opentelemetry.io/otel/exporters/stdout/stdouttrace](https://github.com/open-telemetry/opentelemetry-go) | `1.35.0` | `1.36.0` | | [go.opentelemetry.io/otel/sdk](https://github.com/open-telemetry/opentelemetry-go) | `1.35.0` | `1.36.0` | | [go.opentelemetry.io/otel/trace](https://github.com/open-telemetry/opentelemetry-go) | `1.35.0` | `1.36.0` | Updates `go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp` from 0.60.0 to 0.61.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go-contrib/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go-contrib/compare/zpages/v0.60.0...zpages/v0.61.0) Updates `go.opentelemetry.io/otel` from 1.35.0 to 1.36.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.35.0...v1.36.0) Updates `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` from 1.35.0 to 1.36.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.35.0...v1.36.0) Updates `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` from 1.35.0 to 1.36.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.35.0...v1.36.0) Updates `go.opentelemetry.io/otel/sdk` from 1.35.0 to 1.36.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.35.0...v1.36.0) Updates `go.opentelemetry.io/otel/trace` from 1.35.0 to 1.36.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.35.0...v1.36.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp dependency-version: 0.61.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: tracing - dependency-name: go.opentelemetry.io/otel dependency-version: 1.36.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: tracing - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp dependency-version: 1.36.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: tracing - dependency-name: go.opentelemetry.io/otel/exporters/stdout/stdouttrace dependency-version: 1.36.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: tracing - dependency-name: go.opentelemetry.io/otel/sdk dependency-version: 1.36.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: tracing - dependency-name: go.opentelemetry.io/otel/trace dependency-version: 1.36.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: tracing ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump github.com/containerd/containerd/v2 from 2.0.4 to 2.0.5 (#4391) Bumps [github.com/containerd/containerd/v2](https://github.com/containerd/containerd) from 2.0.4 to 2.0.5. - [Release notes](https://github.com/containerd/containerd/releases) - [Changelog](https://github.com/containerd/containerd/blob/main/RELEASES.md) - [Commits](https://github.com/containerd/containerd/compare/v2.0.4...v2.0.5) --- updated-dependencies: - dependency-name: github.com/containerd/containerd/v2 dependency-version: 2.0.5 dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Mcp server bug fixes (#4431) * bug fixes for problems found during exploration * redact secrets * mcp server ips support (#4432) * mcp server ips support * cleanup lint * Bump github.com/cloudflare/circl from 1.3.7 to 1.6.1 (#4433) Bumps [github.com/cloudflare/circl](https://github.com/cloudflare/circl) from 1.3.7 to 1.6.1. - [Release notes](https://github.com/cloudflare/circl/releases) - [Commits](https://github.com/cloudflare/circl/compare/v1.3.7...v1.6.1) --- updated-dependencies: - dependency-name: github.com/cloudflare/circl dependency-version: 1.6.1 dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump the aws-sdk group across 1 directory with 3 updates (#4428) Bumps the aws-sdk group with 2 updates in the / directory: [github.com/aws/aws-sdk-go-v2/config](https://github.com/aws/aws-sdk-go-v2) and [github.com/aws/aws-sdk-go-v2/service/s3](https://github.com/aws/aws-sdk-go-v2). Updates `github.com/aws/aws-sdk-go-v2/config` from 1.29.14 to 1.29.15 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.29.14...config/v1.29.15) Updates `github.com/aws/aws-sdk-go-v2/credentials` from 1.17.67 to 1.17.68 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/credentials/v1.17.67...credentials/v1.17.68) Updates `github.com/aws/aws-sdk-go-v2/service/s3` from 1.79.2 to 1.80.1 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.79.2...service/s3/v1.80.1) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/config dependency-version: 1.29.15 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk - dependency-name: github.com/aws/aws-sdk-go-v2/credentials dependency-version: 1.17.68 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk - dependency-name: github.com/aws/aws-sdk-go-v2/service/s3 dependency-version: 1.80.1 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: aws-sdk ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump the golangx group with 6 updates (#4434) Bumps the golangx group with 6 updates: | Package | From | To | | --- | --- | --- | | [golang.org/x/crypto](https://github.com/golang/crypto) | `0.38.0` | `0.39.0` | | [golang.org/x/mod](https://github.com/golang/mod) | `0.24.0` | `0.25.0` | | [golang.org/x/net](https://github.com/golang/net) | `0.40.0` | `0.41.0` | | [golang.org/x/sync](https://github.com/golang/sync) | `0.14.0` | `0.15.0` | | [golang.org/x/text](https://github.com/golang/text) | `0.25.0` | `0.26.0` | | [golang.org/x/time](https://github.com/golang/time) | `0.11.0` | `0.12.0` | Updates `golang.org/x/crypto` from 0.38.0 to 0.39.0 - [Commits](https://github.com/golang/crypto/compare/v0.38.0...v0.39.0) Updates `golang.org/x/mod` from 0.24.0 to 0.25.0 - [Commits](https://github.com/golang/mod/compare/v0.24.0...v0.25.0) Updates `golang.org/x/net` from 0.40.0 to 0.41.0 - [Commits](https://github.com/golang/net/compare/v0.40.0...v0.41.0) Updates `golang.org/x/sync` from 0.14.0 to 0.15.0 - [Commits](https://github.com/golang/sync/compare/v0.14.0...v0.15.0) Updates `golang.org/x/text` from 0.25.0 to 0.26.0 - [Release notes](https://github.com/golang/text/releases) - [Commits](https://github.com/golang/text/compare/v0.25.0...v0.26.0) Updates `golang.org/x/time` from 0.11.0 to 0.12.0 - [Commits](https://github.com/golang/time/compare/v0.11.0...v0.12.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-version: 0.39.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx - dependency-name: golang.org/x/mod dependency-version: 0.25.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx - dependency-name: golang.org/x/net dependency-version: 0.41.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx - dependency-name: golang.org/x/sync dependency-version: 0.15.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx - dependency-name: golang.org/x/text dependency-version: 0.26.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx - dependency-name: golang.org/x/time dependency-version: 0.12.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump google.golang.org/grpc from 1.72.1 to 1.73.0 (#4436) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.72.1 to 1.73.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.72.1...v1.73.0) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-version: 1.73.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Jphenow/mpg improvements (#4444) * guard against changes to secrets that already exist * Get regions from the API * destroy command, reduce org flags where we actively don't need them, add them for protections to others * pulls some pieces apart so we can inject dependencies and write a couple basic tests * fix proxy/connect by resolving whether personal org * attach checks * Remove dead params, set disk properly on volume flag * Fixes region selection based on organization for Managed Postgres (#4445) * Enable pgvector from flyctl mpg create (#4447) * Enable pgvector from flyctl mpg create * update retry logic on fly launch with mpg so it's more likely you successfully get a database before launch proceeds * fix mpg test * fix connect selection without passed cluster flag (#4450) * fix: remove feature flag check from launch --db flag (#4448) Flags are defined in `command.New()` invoked on every single command run, adding latency to every flyctl command. The flag doesn't need to be conditional anyway, so remove the feature flag check. * Fix container file updates during deploy (#4452) - Re-parse machine config in launchInputForUpdate to get fresh container file content - This ensures files like fly-entrypoint.sh are updated when changed locally - Matches the behavior of fly launch where container files are properly copied * Fix hanging on launch/deploy by adding timeout to tracing shutdown (#4449) * Fix hanging on launch command errors by adding tracing shutdown timeout When the launch command encounters an error (like docker-compose validation failures), the tracing provider shutdown (tp.Shutdown) was hanging indefinitely while trying to flush telemetry data to the remote collector. This caused the error message to be delayed by up to a minute, creating a poor user experience. The fix adds a 2-second timeout to the tracing provider shutdown, ensuring that even if the telemetry collector is unreachable, the command fails fast and shows the error immediately. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude * address review comments --------- Co-authored-by: Claude * Adding extras support to python dependencies parsing (#4458) * Fix panic in case Cargo.toml has no dependencies (#4455) * Add Docker Compose support to fly deploy (#4454) * Add Docker Compose support to fly deploy - Add compose field to Build struct in appconfig - Create containerconfig package to centralize container file parsing - Extract machine config parsing logic from internal/config - Implement basic Docker Compose YAML parsing with support for: - Single-service compose files - Image, environment, command, entrypoint - Basic port mapping and restart policies - Update deploy and launch commands to use common parsing logic - Add tests and example files This enables users to specify compose = "compose.yml" in their fly.toml [build] section for deployment. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude * Support multiple services in Docker Compose files - Update compose parser to support multi-service compose files - Map compose services to containers in MachineConfig - Use 'app' service as main container, or first service if no 'app' exists - Add comprehensive tests for multi-service support - Update example compose file to demonstrate multi-service usage Single-service compose files continue to work as before. Multi-service files now create containers for each service. * Simplify Docker Compose behavior: always use containers - Remove conditional logic for single vs multi-service compose files - Always map compose services to containers regardless of count - Makes behavior consistent and predictable - Single-service compose files now also create containers - Update tests and examples to reflect simplified behavior This provides a cleaner, more consistent approach where Docker Compose always results in container-based deployments. * Add Docker Compose service networking support - Extract actual entrypoints from Docker images using go-containerregistry - Create hosts update script to map service names to localhost - Wrap container entrypoints to update /etc/hosts before execution - Enable Docker Compose service-to-service networking compatibility - Add comprehensive tests for networking functionality Services can now reference each other by name (e.g., 'db', 'cache') just like in Docker Compose, but all services run on localhost in the Fly.io container environment. * Add volume mounts and health checks support to Docker Compose - Support volume mounts with read-only flag (e.g., ./config:/etc/config:ro) - Convert volume paths to absolute paths relative to compose file - Encode volume file contents as base64 and add to container files - Support health checks with exec commands - Parse health check intervals, timeouts, and retries - Add comprehensive tests for volume and health check features - Update example to demonstrate all features This enables the rate-limiter-demo compose.yml and similar configurations to work with fly deploy. * Remove examples directory * Centralize machine config parsing to use single implementation - Remove duplicate ParseMachineConfig and readLocalFiles functions from containerconfig package - Update containerconfig to use config.ParseConfig as the centralized parsing function - Add mergeConfigs function to properly merge machine configs while preserving existing values - Fix machine config merging to maintain DNS nameservers and other fields during updates - Eliminate ~200 lines of duplicate parsing logic while maintaining full backward compatibility 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude * Simplify container config parsing by eliminating merging complexity - Update ParseComposeFile and ParseComposeFileWithPath to parse directly into existing mConfig - Update composeToMachineConfig to modify existing config instead of creating new one - Remove mergeIntoConfig function - no longer needed since both paths parse in-place - Update ParseContainerConfig to eliminate all merging logic - Update all tests to use new signature with in-place parsing - Both compose files and machine config JSON now use consistent in-place parsing pattern This eliminates ~75 lines of complex merging logic while maintaining full functionality. Both parsing paths now work identically: parse directly into the target mConfig. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude * Fix logic for container file re-parsing in deploy Only re-parse container config for compose files, not machine config. The re-parsing is specifically needed to refresh volume-mounted file content for compose files, but machine config files are already handled correctly by the initial parsing. This eliminates unnecessary work and fixes the logical flaw in the condition. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude * Optimize container file re-parsing logic Instead of always re-parsing when MachineConfig or Compose is present, only re-parse when containers actually have files that might need refreshing. This preserves correctness (both machine config and compose can reference local files) while eliminating unnecessary work when no files are involved. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude * Fix formatting: add missing newlines and remove test files - Add missing newline at end of nginx.conf test file - Remove test-compose directory that was accidentally committed These changes match the pre-commit hook requirements. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude * Add Docker Compose support to fly deploy - Add 'compose' field to Build struct in fly.toml to specify compose file - Create containerconfig package to parse both machine config and compose files - Parse Docker Compose services into Fly containers - Support volume mounts and health checks from compose files - Preserve container images when specified (don't override with build image) - Remove entrypoint extraction and hosts update script - let Fly handle networking - Update tests to match new behavior Usage: Add to fly.toml: [build] compose = "compose.yml" Then run 'fly deploy' to deploy containers from the compose file. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude * Add support for build in compose.yml - Only one service can specify build section - Service with build gets image '.' as placeholder - Other services must specify image - UpdateContainerImage replaces '.' with actual built image - Add comprehensive tests for build functionality * Add Docker Compose depends_on support - Parse both short and long syntax depends_on from compose files - Map Docker Compose conditions to Fly ContainerDependency conditions - Populate ContainerConfig.DependsOn field with parsed dependencies - Add comprehensive tests for dependency functionality 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude * Fix container image selection for compose files - Only apply container selection logic for machine config JSON, not compose files - Prevent compose file images from being overridden by selection logic - Update comments to accurately describe container selection priority This fixes the issue where nginx container was incorrectly getting image '.' when it should keep its specified image from compose file. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude * Refactor ParseContainerConfig and inline ParseComposeFile - Inlined ParseComposeFile function since it was just a wrapper - Updated tests to use ParseComposeFileWithPath directly - Moved selectedContainer declaration to top of ParseContainerConfig - Consolidated container validation into single loop - Removed unnecessary nil check for selectedContainer 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude * Remove file mode setting and healthcheck name from compose parsing - Removed hardcoded file mode (0444/0644) from volume file handling - Removed hardcoded "healthcheck" name, letting it use defaults - Updated tests to not expect specific mode values - Removed debug print statements from updateContainerImage 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude --------- Co-authored-by: Claude * start with pooled builders (#4459) * start with managed-builders * Add DisableManagedBuilders to config.yml * fix and add tests * --pooled-builder flag * rename to --builder-pool * retrieve user and db from credentials (to support more flexible naming) (#4461) * Bump github.com/go-viper/mapstructure/v2 from 2.2.1 to 2.3.0 (#4446) Bumps [github.com/go-viper/mapstructure/v2](https://github.com/go-viper/mapstructure) from 2.2.1 to 2.3.0. - [Release notes](https://github.com/go-viper/mapstructure/releases) - [Changelog](https://github.com/go-viper/mapstructure/blob/main/CHANGELOG.md) - [Commits](https://github.com/go-viper/mapstructure/compare/v2.2.1...v2.3.0) --- updated-dependencies: - dependency-name: github.com/go-viper/mapstructure/v2 dependency-version: 2.3.0 dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * removed stale docs about building via docker (#4462) actual docs are already in CONTRIBUTING.md * Convert Build.Compose from string to struct with auto-detection (#4463) refactor: Convert Build.Compose from string to struct with auto-detection - Changed Build.Compose from string to *BuildCompose struct - Added BuildCompose.File field to hold the compose file path - Added DetectComposeFile() method to auto-detect well-known compose filenames - Updated all references to use the new structure - Added support for auto-detection of compose.yaml, compose.yml, docker-compose.yaml, docker-compose.yml - Added comprehensive tests for the new functionality This change enables more flexible Docker Compose configuration and prepares for future enhancements to compose file handling. 🤖 Generated with Claude Code Co-authored-by: Claude * Fix nil pointer dereference in mcp add command (#4468) * Fix nil pointer dereference in mcp add command The mcp add command was crashing with a nil pointer dereference when calling machine.ListActive() because no flaps client was set up in the context. This adds the missing flaps client initialization before calling appconfig.FromRemoteApp(). 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude * Fix formatting issues in mcp config 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude --------- Co-authored-by: Claude * Fix name collision when calling DeleteAddOn (#4469) * feat: update DeleteAddOn mutation with provider * feat: add validation to check providers match * feat: add provider to all DeleteAddOn mutations --------- Co-authored-by: Injoong Yoon <27440940+AttilaTheHen@users.noreply.github.com> * MPG: use existing "default" user, with credentials retrieved from existing call (#4472) use existing "default" user, with credentials retrieved from existing call to getManagedClusterById * cleanup output for `flyctl mpg proxy` (#4473) cleanup output for flyctl mpg proxy * Jphenow/obvious MPG cost (#4476) * unnecessary slice append for a static command option * show plan info on create * Bump rack from 2.2.13 to 2.2.14 in /test/preflight/fixtures/example-buildpack (#4360) Bump rack in /test/preflight/fixtures/example-buildpack Bumps [rack](https://github.com/rack/rack) from 2.2.13 to 2.2.14. - [Release notes](https://github.com/rack/rack/releases) - [Changelog](https://github.com/rack/rack/blob/main/CHANGELOG.md) - [Commits](https://github.com/rack/rack/compare/v2.2.13...v2.2.14) --- updated-dependencies: - dependency-name: rack dependency-version: 2.2.14 dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump the aws-sdk group with 3 updates (#4437) Bumps the aws-sdk group with 3 updates: [github.com/aws/aws-sdk-go-v2/config](https://github.com/aws/aws-sdk-go-v2), [github.com/aws/aws-sdk-go-v2/credentials](https://github.com/aws/aws-sdk-go-v2) and [github.com/aws/aws-sdk-go-v2/service/s3](https://github.com/aws/aws-sdk-go-v2). Updates `github.com/aws/aws-sdk-go-v2/config` from 1.29.16 to 1.29.17 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.29.16...config/v1.29.17) Updates `github.com/aws/aws-sdk-go-v2/credentials` from 1.17.69 to 1.17.70 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/credentials/v1.17.69...credentials/v1.17.70) Updates `github.com/aws/aws-sdk-go-v2/service/s3` from 1.80.2 to 1.80.3 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.80.2...service/s3/v1.80.3) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/config dependency-version: 1.29.17 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk - dependency-name: github.com/aws/aws-sdk-go-v2/credentials dependency-version: 1.17.70 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk - dependency-name: github.com/aws/aws-sdk-go-v2/service/s3 dependency-version: 1.80.3 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * ci: remove windows-2019 and include macos-15 (#4477) windows-2019 is deprecated. macos-15 is not yet tagged "macos-latest", but Apple's latest version. * Improve the output of the `fly certs` commands. (#4478) * Update gql schema * Rework `fly certs add` to provide all setup options. * Add `fly certs setup` command to show configuration steps again * Formatting * Lints * Formatting * Bump the tracing group with 6 updates (#4479) Bumps the tracing group with 6 updates: | Package | From | To | | --- | --- | --- | | [go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp](https://github.com/open-telemetry/opentelemetry-go-contrib) | `0.61.0` | `0.62.0` | | [go.opentelemetry.io/otel](https://github.com/open-telemetry/opentelemetry-go) | `1.36.0` | `1.37.0` | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp](https://github.com/open-telemetry/opentelemetry-go) | `1.36.0` | `1.37.0` | | [go.opentelemetry.io/otel/exporters/stdout/stdouttrace](https://github.com/open-telemetry/opentelemetry-go) | `1.36.0` | `1.37.0` | | [go.opentelemetry.io/otel/sdk](https://github.com/open-telemetry/opentelemetry-go) | `1.36.0` | `1.37.0` | | [go.opentelemetry.io/otel/trace](https://github.com/open-telemetry/opentelemetry-go) | `1.36.0` | `1.37.0` | Updates `go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp` from 0.61.0 to 0.62.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go-contrib/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go-contrib/compare/zpages/v0.61.0...zpages/v0.62.0) Updates `go.opentelemetry.io/otel` from 1.36.0 to 1.37.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.36.0...v1.37.0) Updates `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` from 1.36.0 to 1.37.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.36.0...v1.37.0) Updates `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` from 1.36.0 to 1.37.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.36.0...v1.37.0) Updates `go.opentelemetry.io/otel/sdk` from 1.36.0 to 1.37.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.36.0...v1.37.0) Updates `go.opentelemetry.io/otel/trace` from 1.36.0 to 1.37.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.36.0...v1.37.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp dependency-version: 0.62.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: tracing - dependency-name: go.opentelemetry.io/otel dependency-version: 1.37.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: tracing - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp dependency-version: 1.37.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: tracing - dependency-name: go.opentelemetry.io/otel/exporters/stdout/stdouttrace dependency-version: 1.37.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: tracing - dependency-name: go.opentelemetry.io/otel/sdk dependency-version: 1.37.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: tracing - dependency-name: go.opentelemetry.io/otel/trace dependency-version: 1.37.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: tracing ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump the golangx group with 7 updates (#4483) Bumps the golangx group with 7 updates: | Package | From | To | | --- | --- | --- | | [golang.org/x/crypto](https://github.com/golang/crypto) | `0.39.0` | `0.40.0` | | [golang.org/x/mod](https://github.com/golang/mod) | `0.25.0` | `0.26.0` | | [golang.org/x/net](https://github.com/golang/net) | `0.41.0` | `0.42.0` | | [golang.org/x/sync](https://github.com/golang/sync) | `0.15.0` | `0.16.0` | | [golang.org/x/sys](https://github.com/golang/sys) | `0.33.0` | `0.34.0` | | [golang.org/x/term](https://github.com/golang/term) | `0.32.0` | `0.33.0` | | [golang.org/x/text](https://github.com/golang/text) | `0.26.0` | `0.27.0` | Updates `golang.org/x/crypto` from 0.39.0 to 0.40.0 - [Commits](https://github.com/golang/crypto/compare/v0.39.0...v0.40.0) Updates `golang.org/x/mod` from 0.25.0 to 0.26.0 - [Commits](https://github.com/golang/mod/compare/v0.25.0...v0.26.0) Updates `golang.org/x/net` from 0.41.0 to 0.42.0 - [Commits](https://github.com/golang/net/compare/v0.41.0...v0.42.0) Updates `golang.org/x/sync` from 0.15.0 to 0.16.0 - [Commits](https://github.com/golang/sync/compare/v0.15.0...v0.16.0) Updates `golang.org/x/sys` from 0.33.0 to 0.34.0 - [Commits](https://github.com/golang/sys/compare/v0.33.0...v0.34.0) Updates `golang.org/x/term` from 0.32.0 to 0.33.0 - [Commits](https://github.com/golang/term/compare/v0.32.0...v0.33.0) Updates `golang.org/x/text` from 0.26.0 to 0.27.0 - [Release notes](https://github.com/golang/text/releases) - [Commits](https://github.com/golang/text/compare/v0.26.0...v0.27.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-version: 0.40.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx - dependency-name: golang.org/x/mod dependency-version: 0.26.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx - dependency-name: golang.org/x/net dependency-version: 0.42.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx - dependency-name: golang.org/x/sync dependency-version: 0.16.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx - dependency-name: golang.org/x/sys dependency-version: 0.34.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx - dependency-name: golang.org/x/term dependency-version: 0.33.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx - dependency-name: golang.org/x/text dependency-version: 0.27.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * test: trace preflight (#4482) * test: trace preflight Preflight test's flyctl is IsDev == true, but the traces are better to go to the production endpoint, to connect them with server-side production traces. * feat: give all tests the common prefix The prefix was random and it was harder to filter tests from one specific run. * uses more up-to-date postgresql URL schema (#4481) * uses more up-to-date postgresql URL schema * Fly auth docker (#4484) * Update text styling and add note for token expiration * test commit * Fix trailing whitespace linting issue * Config: strict validation (#4487) * ignore CLAUDE.md * Add `--strict` flag to `flyctl config validate` which will check for unrecognised sections/fields in the config file * fixup tests for strict validation * call `patchRoot` before running strict validation so that old-format … (#4488) * call `patchRoot` before running strict validation so that old-format config files aren't automatically marked as invalid * Update deprecation notice for unmanaged Postgres (#4492) * Update fly scale vm flag (#4493) * Allow enabling Postgis via mpg create command (#4494) * allow enabling Postgis via mpg create command * Fix tests * Use a retrieved CNAME target rather than using the app preview domain (#4495) * Use a retrieved CNAME target rather than using the app preview domain * mod tidy * Fixup inmem & mock * fix(scale): Ignore volumes on troubled hosts (#4497) * Bump github.com/fsnotify/fsnotify from 1.8.0 to 1.9.0 (#4490) Bumps [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify) from 1.8.0 to 1.9.0. - [Release notes](https://github.com/fsnotify/fsnotify/releases) - [Changelog](https://github.com/fsnotify/fsnotify/blob/main/CHANGELOG.md) - [Commits](https://github.com/fsnotify/fsnotify/compare/v1.8.0...v1.9.0) --- updated-dependencies: - dependency-name: github.com/fsnotify/fsnotify dependency-version: 1.9.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Fix: Always print setup options for `certs setup` command (#4499) * MPG Extensions housekeeping (#4501) * Remove PGVector option from managed Postgres cluster creation * Rename PostGIS flag for cluster creation * fix acquireMachineLease timeout seconds calculation (#4508) * Bump the aws-sdk group across 1 directory with 3 updates (#4491) Bumps the aws-sdk group with 2 updates in the / directory: [github.com/aws/aws-sdk-go-v2/config](https://github.com/aws/aws-sdk-go-v2) and [github.com/aws/aws-sdk-go-v2/service/s3](https://github.com/aws/aws-sdk-go-v2). Updates `github.com/aws/aws-sdk-go-v2/config` from 1.29.17 to 1.29.18 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.29.17...config/v1.29.18) Updates `github.com/aws/aws-sdk-go-v2/credentials` from 1.17.70 to 1.17.71 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/credentials/v1.17.70...credentials/v1.17.71) Updates `github.com/aws/aws-sdk-go-v2/service/s3` from 1.83.0 to 1.84.1 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.83.0...service/s3/v1.84.1) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/config dependency-version: 1.29.18 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk - dependency-name: github.com/aws/aws-sdk-go-v2/credentials dependency-version: 1.17.71 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk - dependency-name: github.com/aws/aws-sdk-go-v2/service/s3 dependency-version: 1.84.1 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: aws-sdk ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump github.com/cenkalti/backoff/v5 from 5.0.2 to 5.0.3 (#4512) Bumps [github.com/cenkalti/backoff/v5](https://github.com/cenkalti/backoff) from 5.0.2 to 5.0.3. - [Changelog](https://github.com/cenkalti/backoff/blob/v5/CHANGELOG.md) - [Commits](https://github.com/cenkalti/backoff/compare/v5.0.2...v5.0.3) --- updated-dependencies: - dependency-name: github.com/cenkalti/backoff/v5 dependency-version: 5.0.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump github.com/launchdarkly/go-sdk-common/v3 from 3.2.0 to 3.4.0 (#4513) Bumps [github.com/launchdarkly/go-sdk-common/v3](https://github.com/launchdarkly/go-sdk-common) from 3.2.0 to 3.4.0. - [Release notes](https://github.com/launchdarkly/go-sdk-common/releases) - [Changelog](https://github.com/launchdarkly/go-sdk-common/blob/v3/CHANGELOG.md) - [Commits](https://github.com/launchdarkly/go-sdk-common/compare/v3.2.0...v3.4.0) --- updated-dependencies: - dependency-name: github.com/launchdarkly/go-sdk-common/v3 dependency-version: 3.4.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Rename command to 'mpg' and remove alias (#4514) * Bump the aws-sdk group with 3 updates (#4515) Bumps the aws-sdk group with 3 updates: [github.com/aws/aws-sdk-go-v2/config](https://github.com/aws/aws-sdk-go-v2), [github.com/aws/aws-sdk-go-v2/credentials](https://github.com/aws/aws-sdk-go-v2) and [github.com/aws/aws-sdk-go-v2/service/s3](https://github.com/aws/aws-sdk-go-v2). Updates `github.com/aws/aws-sdk-go-v2/config` from 1.30.2 to 1.30.3 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/v1.30.2...v1.30.3) Updates `github.com/aws/aws-sdk-go-v2/credentials` from 1.18.2 to 1.18.3 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.18.3/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.18.2...config/v1.18.3) Updates `github.com/aws/aws-sdk-go-v2/service/s3` from 1.85.1 to 1.86.0 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.85.1...service/s3/v1.86.0) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/config dependency-version: 1.30.3 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk - dependency-name: github.com/aws/aws-sdk-go-v2/credentials dependency-version: 1.18.3 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk - dependency-name: github.com/aws/aws-sdk-go-v2/service/s3 dependency-version: 1.86.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: aws-sdk ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * test: "fly wireguard create" should create _peer.internal entry (#4510) The issue was fixed on the platform side, but preflight is a good place to make sure that the feature is working. * Tweak doc publishing + make sync-docs a standalone workflow (#4517) * Default to MPG in compatible regions and encourage it when it makes sense (#4505) * default to MPG when you're setting up in a valid MPG region * nudge the app over to the selected MPG region if they did in fact move to an MPG region * Update postgres launch descriptions * default in less-breaking ways depending on whether interactivity allows adjustment, remove unnecessarily chatty warnings * Add an interactive `fly ips allocate` command (#4518) * Add an interactive `fly ips allocate` command * Update certs command instructions for allocate command * Update test * Tweak * Tweak again... * Fixup error message * Update command short text * Action didn't catch the release trigger — changing to match release.yml (#4520) * Remove branch guard from sync docs since it triggers on tag (#4521) * Revert "Remove branch guard from sync docs since it triggers on tag (… (#4522) Revert "Remove branch guard from sync docs since it triggers on tag (#4521)" This reverts commit 5a401dfda23a966096dc5d4d062537b9b4069195. * Okay last change — don't need to run sync docs on tag at all. (#4523) * build: upgrade Go to 1.24.5 (#4519) There are small tweaks since 1.24 is more strict about fmt.Printf and its family. https://tip.golang.org/doc/go1.24#vet > The existing printf analyzer now reports a diagnostic for calls of > the form fmt.Printf(s), where s is a non-constant format string, > with no other arguments. * Bump github.com/depot/depot-go from 0.5.0 to 0.5.1 (#4516) Bumps [github.com/depot/depot-go](https://github.com/depot/depot-go) from 0.5.0 to 0.5.1. - [Release notes](https://github.com/depot/depot-go/releases) - [Commits](https://github.com/depot/depot-go/compare/v0.5.0...v0.5.1) --- updated-dependencies: - dependency-name: github.com/depot/depot-go dependency-version: 0.5.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * buildkit builder (#4489) * add support for buildkit builder Uses an external buildkit daemon as a build strategy when the `--buildkit-addr` flag (or `BUILDKIT_ADDR` env) is set. If a direct connection fails, attempt to connect through the target app's private network using a Wireguard agent. * Add MPG 'starter' plan (#4527) * Bump github.com/go-viper/mapstructure/v2 from 2.3.0 to 2.4.0 (#4529) Bumps [github.com/go-viper/mapstructure/v2](https://github.com/go-viper/mapstructure) from 2.3.0 to 2.4.0. - [Release notes](https://github.com/go-viper/mapstructure/releases) - [Changelog](https://github.com/go-viper/mapstructure/blob/main/CHANGELOG.md) - [Commits](https://github.com/go-viper/mapstructure/compare/v2.3.0...v2.4.0) --- updated-dependencies: - dependency-name: github.com/go-viper/mapstructure/v2 dependency-version: 2.4.0 dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * add registry auth for internal mirror (#4530) * make --push work with depot (#4532) See https://community.fly.io/t/how-to-build-and-push-docker-image-without-deploying/25746 * ci: skip "make" correctly (#4511) It has been broken since #4013. Building flyctl takes 1.5 minutes nowadays. So skipping the step will shorten the feedback cycle. * detect if we're using compatible tokens for MPG commands (#4534) * detect if we're using compatible tokens for MPG commands * Bump github.com/vektah/gqlparser/v2 from 2.5.26 to 2.5.30 (#4524) Bumps [github.com/vektah/gqlparser/v2](https://github.com/vektah/gqlparser) from 2.5.26 to 2.5.30. - [Release notes](https://github.com/vektah/gqlparser/releases) - [Commits](https://github.com/vektah/gqlparser/compare/v2.5.26...v2.5.30) --- updated-dependencies: - dependency-name: github.com/vektah/gqlparser/v2 dependency-version: 2.5.30 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * set fly_builder_id metadata from remote builder (#4533) * set fly_builder_id metadata on machines created from a remote builder image this metadata can be used to pull the built image directly from the source host. * set builder ID from buildkit hostname * add fly_builder_id to new machine launch input * Remove the ability to provision Enveloop extension (#4537) Remove the ability to provision Enveloop * feat: provision a Buildkit-based builder when -buildkit-image is specified (#4526) * feat: provision a Buildkit-based builder when -buildkit-image is specified In addition to using an existing builder via -buildkit-addr, this change adds -buildkit-image, which provisions a new builder with the specified image. Eventually the image should be coming from the server-side, but we are not there yet. * fix: update the help text of -buildkit-image * refactor: use buildkitGRPCPort instead of hard-coding We may not change the port though. * feat: enable autostart and autostop=stop * fix: validateBuilderMachine returns a machine in an error case * fix: pass these parameters as like other EnsureBuilder calls * fix: typo * Bump the tracing group with 6 updates (#4542) Bumps the tracing group with 6 updates: | Package | From | To | | --- | --- | --- | | [go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp](https://github.com/open-telemetry/opentelemetry-go-contrib) | `0.62.0` | `0.63.0` | | [go.opentelemetry.io/otel](https://github.com/open-telemetry/opentelemetry-go) | `1.37.0` | `1.38.0` | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp](https://github.com/open-telemetry/opentelemetry-go) | `1.37.0` | `1.38.0` | | [go.opentelemetry.io/otel/exporters/stdout/stdouttrace](https://github.com/open-telemetry/opentelemetry-go) | `1.37.0` | `1.38.0` | | [go.opentelemetry.io/otel/sdk](https://github.com/open-telemetry/opentelemetry-go) | `1.37.0` | `1.38.0` | | [go.opentelemetry.io/otel/trace](https://github.com/open-telemetry/opentelemetry-go) | `1.37.0` | `1.38.0` | Updates `go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp` from 0.62.0 to 0.63.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go-contrib/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go-contrib/compare/zpages/v0.62.0...zpages/v0.63.0) Updates `go.opentelemetry.io/otel` from 1.37.0 to 1.38.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.37.0...v1.38.0) Updates `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` from 1.37.0 to 1.38.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.37.0...v1.38.0) Updates `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` from 1.37.0 to 1.38.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.37.0...v1.38.0) Updates `go.opentelemetry.io/otel/sdk` from 1.37.0 to 1.38.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.37.0...v1.38.0) Updates `go.opentelemetry.io/otel/trace` from 1.37.0 to 1.38.0 - [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases) - [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/v1.37.0...v1.38.0) --- updated-dependencies: - dependency-name: go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp dependency-version: 0.63.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: tracing - dependency-name: go.opentelemetry.io/otel dependency-version: 1.38.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: tracing - dependency-name: go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp dependency-version: 1.38.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: tracing - dependency-name: go.opentelemetry.io/otel/exporters/stdout/stdouttrace dependency-version: 1.38.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: tracing - dependency-name: go.opentelemetry.io/otel/sdk dependency-version: 1.38.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: tracing - dependency-name: go.opentelemetry.io/otel/trace dependency-version: 1.38.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: tracing ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * feat: --buildkit flag and buildkit-builder improvements (#4541) * feat: --buildkit flag and buildkit-builder improvements * run ensureBuilder only after connection failure to minimize happy-path latency * add --buildkit flag which uses a default buildkit image for the builder app * mark, --buildkit-addr, --buildkit-image, --builder-pool flags hidden * add 'Updating remote builder' progress message for ensureBuilder * Bump github.com/spf13/pflag from 1.0.6 to 1.0.9 (#4543) Bumps [github.com/spf13/pflag](https://github.com/spf13/pflag) from 1.0.6 to 1.0.9. - [Release notes](https://github.com/spf13/pflag/releases) - [Commits](https://github.com/spf13/pflag/compare/v1.0.6...v1.0.9) --- updated-dependencies: - dependency-name: github.com/spf13/pflag dependency-version: 1.0.9 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * ci: upgrade golangci-lint and enable misspell linter (#4540) * ci: upgrade golangci-lint * refactor: apply `golangci-lint run --fix` * refactor: manually address some lint issues * fix: fix misspelled words I'm not the only one :) * Enable debug logging on the tagging action (#4547) * Bump the aws-sdk group with 3 updates (#4546) Bumps the aws-sdk group with 3 updates: [github.com/aws/aws-sdk-go-v2/config](https://github.com/aws/aws-sdk-go-v2), [github.com/aws/aws-sdk-go-v2/credentials](https://github.com/aws/aws-sdk-go-v2) and [github.com/aws/aws-sdk-go-v2/service/s3](https://github.com/aws/aws-sdk-go-v2). Updates `github.com/aws/aws-sdk-go-v2/config` from 1.30.3 to 1.31.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/v1.30.3...config/v1.31.6) Updates `github.com/aws/aws-sdk-go-v2/credentials` from 1.18.3 to 1.18.10 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.18.3...config/v1.18.10) Updates `github.com/aws/aws-sdk-go-v2/service/s3` from 1.86.0 to 1.87.3 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.86.0...service/s3/v1.87.3) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/config dependency-version: 1.31.6 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: aws-sdk - dependency-name: github.com/aws/aws-sdk-go-v2/credentials dependency-version: 1.18.10 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk - dependency-name: github.com/aws/aws-sdk-go-v2/service/s3 dependency-version: 1.87.3 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: aws-sdk ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * fix: NewResolver must have a provisoner (#4551) It may make sense to have this parameter mandatory later. Fixes #4548. * Use cli not api for tagging new versions (#4553) * Add flyctl sftp put command and directory support (#4558) * Add flyctl sftp put command and directory support for get/put This adds comprehensive SFTP functionality to flyctl: - **New `flyctl sftp put` command**: Upload files to remote VMs with configurable permissions via -m/--mode flag (default: 0644) - **Directory support**: Both get and put now support recursive directory operations with -R/--recursive flag - `flyctl sftp get -R `: Downloads directories as local directory structure (uses temporary ZIP internally for atomic transfer) - `flyctl sftp put -R `: Uploads entire directory trees - **Safety features**: Prevents overwriting existing files/directories - **Progress feedback**: Shows file-by-file progress during operations - **Backwards compatible**: All existing usage patterns continue to work - **Consistent arguments**: Both commands follow ` [destination]` pattern The implementation reuses existing patterns from the interactive shell mode and follows the same safety-first approach as other flyctl commands. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude * Flyctl will now use the machines API for managing app secrets. (#4535) The `fly secrets` commands will use the machines API for managing secrets and will track the returned petsem min_version for each app in the fly config file. It will use this min_version when fetching secrets or performing deploys to ensure that the latest secrets are used. * elimiate graphql calls for SetSecrets, GetSecrets, UnsetSecrets and replace them with flaps calls using a new appsecrets library. * fill min MinSecretsVersion when launching machines by adding minvers to launch input structure * delete minvers when we delete an app * populate testing context with a config file directory, now needed for getting minvers from config files * use latest fly-go from tagged version * fix: refresh buildkit credentials (#4557) fixes #4556. * deploy: skip regional IPs in DNS check (#4560) Regional IPs aren't returned by our public DNS, so the check always fails for apps with regional IPs. Make sure we only count global IPs here. * Update Deno version and Alpine base image (#4559) * Bump github.com/mark3labs/mcp-go from 0.31.0 to 0.39.1 (#4554) Bumps [github.com/mark3labs/mcp-go](https://github.com/mark3labs/mcp-go) from 0.31.0 to 0.39.1. - [Release notes](https://github.com/mark3labs/mcp-go/releases) - [Commits](https://github.com/mark3labs/mcp-go/compare/v0.31.0...v0.39.1) --- updated-dependencies: - dependency-name: github.com/mark3labs/mcp-go dependency-version: 0.39.1 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * fix: add builder ID to Depot image build metadata (#4561) * fix: remove installers (#4562) We still use the same installer scripts, but superfly/flyctl is no longer the source of truth. * bluegreen: Yeet custom top-level checks for bluegreen deployments (#4565) Recent rework of cordoning has removed the need for these, I think. * secrets: allow to disable DNS checks for secrets operations (#4566) DNS checks verify that the IP addresses assigned to an app have propagated to some known public resolver. In some environment the checks might fail, which adds significant latency to the otherwise fast operations like 'secrets set'/'secrets unset'. Allow the checks to be disabled with '--dns-checks=false' flag like we do for 'flyctl deploy'. Closes #3992 * Revert "bluegreen: Yeet custom top-level checks for bluegreen deploym… (#4567) Revert "bluegreen: Yeet custom top-level checks for bluegreen deployments (#4565)" This reverts commit e0004d0149cadef3950f6700157fc0017d149060. * build a flaps client in the context before setting secrets (#4569) the secrets api needs a flaps client in the context, so add it before calling the api. * bluegreen: Yeet custom top-level checks during bluegreen deploys, take 2 (#4568) * bluegreen: Yeet custom top-level checks during bluegreen deploys, take 2 We used to generate bg_deployments_* top-level checks for every service-level check defined for the app, because cordoned machines didn't have their health checks registered before. This is no longer the case as we have reworked how cordoning works. However just removing these services won't work because we're only checking top-level checks during bluegreen deployments. This is now also fixed so that we are actually looking at all checks, including service-level ones, while bluegreen deployments are in progress. * Count service-level checks as well for bluegreen ...since we can do it now * Bump fly-go dependency to v0.1.54 (#4574) * Bump fly-go dependency to v0.1.54 This includes a bunch of changes but most relevant for me here is that it removes bg_deployments_compat checks generated by flaps; see https://github.com/superfly/fly-go/commit/e2fc7e015a5943359a568d8e079125534ca794c8 * Tidy go.sum * test: test "fly console --dockerfile" (#4552) This change is a follow-up for #4551. The --dockerfile flag was not tested and we broke that. * Honor --yes flag for `fly storage update` command! (#4572) * Assume confirm is true if --yes flag passed * run gofmt -w filename to fix code format * Show certificate validation errors in the CLI (#4570) * Show certificate validation errors in the CLI * tidy * Add database flag to mpg connect command (#4577) * Add a command for synching flyctl to the current secrets minversion for an app (#4576) * Add a command for synching flyctl to the current secrets minversion for an app * fix(test): Relax destroy-ing volume state check (#4580) relax destroy-ing volume state test * Add --scheduled-snapshots to `volume create` (#4579) * Roll out zstd image compression with LaunchDarkly (#4573) This has already been implemented before, now it's time to roll it out as the default. * Bump rack from 2.2.14 to 2.2.18 in /test/preflight/fixtures/example-buildpack (#4575) Bump rack in /test/preflight/fixtures/example-buildpack Bumps [rack](https://github.com/rack/rack) from 2.2.14 to 2.2.18. - [Release notes](https://github.com/rack/rack/releases) - [Changelog](https://github.com/rack/rack/blob/main/CHANGELOG.md) - [Commits](https://github.com/rack/rack/compare/v2.2.14...v2.2.18) --- updated-dependencies: - dependency-name: rack dependency-version: 2.2.18 dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump the golangx group with 8 updates (#4563) Bumps the golangx group with 8 updates: | Package | From | To | | --- | --- | --- | | [golang.org/x/crypto](https://github.com/golang/crypto) | `0.41.0` | `0.42.0` | | [golang.org/x/mod](https://github.com/golang/mod) | `0.27.0` | `0.28.0` | | [golang.org/x/net](https://github.com/golang/net) | `0.43.0` | `0.44.0` | | [golang.org/x/sync](https://github.com/golang/sync) | `0.16.0` | `0.17.0` | | [golang.org/x/sys](https://github.com/golang/sys) | `0.35.0` | `0.36.0` | | [golang.org/x/term](https://github.com/golang/term) | `0.34.0` | `0.35.0` | | [golang.org/x/text](https://github.com/golang/text) | `0.28.0` | `0.29.0` | | [golang.org/x/time](https://github.com/golang/time) | `0.12.0` | `0.13.0` | Updates `golang.org/x/crypto` from 0.41.0 to 0.42.0 - [Commits](https://github.com/golang/crypto/compare/v0.41.0...v0.42.0) Updates `golang.org/x/mod` from 0.27.0 to 0.28.0 - [Commits](https://github.com/golang/mod/compare/v0.27.0...v0.28.0) Updates `golang.org/x/net` from 0.43.0 to 0.44.0 - [Commits](https://github.com/golang/net/compare/v0.43.0...v0.44.0) Updates `golang.org/x/sync` from 0.16.0 to 0.17.0 - [Commits](https://github.com/golang/sync/compare/v0.16.0...v0.17.0) Updates `golang.org/x/sys` from 0.35.0 to 0.36.0 - [Commits](https://github.com/golang/sys/compare/v0.35.0...v0.36.0) Updates `golang.org/x/term` from 0.34.0 to 0.35.0 - [Commits](https://github.com/golang/term/compare/v0.34.0...v0.35.0) Updates `golang.org/x/text` from 0.28.0 to 0.29.0 - [Release notes](https://github.com/golang/text/releases) - [Commits](https://github.com/golang/text/compare/v0.28.0...v0.29.0) Updates `golang.org/x/time` from 0.12.0 to 0.13.0 - [Commits](https://github.com/golang/time/compare/v0.12.0...v0.13.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-version: 0.42.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx - dependency-name: golang.org/x/mod dependency-version: 0.28.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx - dependency-name: golang.org/x/net dependency-version: 0.44.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx - dependency-name: golang.org/x/sync dependency-version: 0.17.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx - dependency-name: golang.org/x/sys dependency-version: 0.36.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx - dependency-name: golang.org/x/term dependency-version: 0.35.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx - dependency-name: golang.org/x/text dependency-version: 0.29.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx - dependency-name: golang.org/x/time dependency-version: 0.13.0 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: golangx ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * fix(panic): check Build section is not nil (#4585) * Remove enveloop (#4578) Remove enveloop extension * Configure scheduled_snapshots via fly.toml and flyctl launch (#4586) * Configure scheduled_snapshots via fly.toml * Configure scheduled_snapshots via flyctl launch * use the latest fly-go to use `version` instead of `Version` in flaps secret API. (#4590) Flaps changed the secrets version to be returned in `version` instead of `Version`. Fly-go was updated to use this field. This commit makes flyctl use this latest version. * test: use EventuallyWithT methods correctly (#4583) The inner function should use assert.CollectT to keep the loop running. * fix: avoid client.GetOrganizationBySlug which queries too much fields (#4584) stage.Org() calls client.GetOrganizationBySlug() which queries too much fields, and times out occasionally. This change adds stateCompact() which queries less and more reliable. * fix: upgrade sinatra to clear GHSA-hxx2-7vcw-mqr3 (#4582) We are not affected, but it is better to clear the Dependabot alert. https://github.com/advisories/GHSA-hxx2-7vcw-mqr3 * Improve output of `volume snapshot list` (#4587) * Bump fly-go * Sort snapshots from oldest to newest This makes it easier to understand the incremental stored sizes * Add Volume Size column * Right-align numeric columns * Show total stored size * Stop using GraphQL API It calls the same underlying API * refactor: use fmt.Fprintf instad of fmt.Sprintf (#4581) * refactor: bye golang.org/x/exp (#3944) refactor: remove golang.org/x/exp Both maps and slices are now in Go itself. * refactor: bye gopkg.in/yaml.v2 (#4591) We don't have to use both gopkg.in/yaml.v3 and gopkg.in/yaml.v2. * Remove unused opencontainers/image-spec dependency * fix pre commit * fix dupe flag and go mod tidy * add missing flags * missing flags * more missing flags * more envs --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Somtochi Onyekwere Co-authored-by: mattvr <4052466+mattvr@users.noreply.github.com> Co-authored-by: Will Jordan Co-authored-by: Daniel Graña Co-authored-by: George Garside <191085+grgar@users.noreply.github.com> Co-authored-by: Adil Ansari Co-authored-by: Kazuyoshi Kato Co-authored-by: Sam Ruby Co-authored-by: asib Co-authored-by: Joshua Sierles Co-authored-by: Jacob Fenton Co-authored-by: Saleem Rashid Co-authored-by: lea Co-authored-by: Addison LeClair Co-authored-by: Tim <165851289+timflyio@users.noreply.github.com> Co-authored-by: Petr Gazarov Co-authored-by: Injoong Yoon <27440940+injoongy@users.noreply.github.com> Co-authored-by: Injoong Yoon <27440940+AttilaTheHen@users.noreply.github.com> Co-authored-by: mjbraun Co-authored-by: Jon Phenow Co-authored-by: Claude Co-authored-by: Alexey Makhov Co-authored-by: Simon Horne Co-authored-by: Liam Bigelow <40188355+bglw@users.noreply.github.com> Co-authored-by: Kaelyn <87516931+Lucais11@users.noreply.github.com> Co-authored-by: Kyle McLaren Co-authored-by: swilson-fly <123485058+swilson-fly@users.noreply.github.com> Co-authored-by: Christopher Louvet Co-authored-by: Pavel Borzenkov Co-authored-by: Amogh Lele Co-authored-by: Peter Cai Co-authored-by: Kathryn Tan <33826441+KTanAug21@users.noreply.github.com> Co-authored-by: Rian McGuire Co-authored-by: adam-fly-io --- .github/workflows/auto-release.yml | 31 +- .github/workflows/build.yml | 3 +- .github/workflows/checks.yml | 8 +- .github/workflows/deployer-tests.yml | 14 +- .github/workflows/preflight.yml | 4 +- .github/workflows/preflight_gpu.yml | 113 -- .github/workflows/sync-docs.yml | 41 + .github/workflows/test_install.yml | 126 -- .gitignore | 3 +- .golangci.yml | 185 +- .goreleaser.2.yml | 9 + .goreleaser.yml | 9 + .pre-commit-config.yaml | 32 +- Dockerfile | 2 +- Dockerfile.dev | 2 +- Dockerfile.mcp | 5 + README.md | 6 - agent/client.go | 41 +- agent/remote.go | 42 + deploy.rb | 1 + deployer.Dockerfile | 6 +- deployer.Dockerfile.dockerignore | 2 +- doc/main.go | 16 +- flyctl/config.go | 1 + flyctl/flyctl.go | 4 +- flypg/launcher.go | 27 +- go.mod | 250 +-- go.sum | 547 +++--- gql/generated.go | 101 +- gql/genqclient.graphql | 7 +- gql/schema.graphql | 544 +++--- installers/install.ps1 | 96 - installers/install.sh | 59 - internal/appconfig/config.go | 130 +- internal/appconfig/context.go | 15 + internal/appconfig/definition_test.go | 15 +- internal/appconfig/machines.go | 43 +- internal/appconfig/patches.go | 5 + internal/appconfig/serde.go | 28 +- internal/appconfig/serde_test.go | 94 +- internal/appconfig/service.go | 12 +- internal/appconfig/setters.go | 38 +- internal/appconfig/setters_test.go | 8 +- internal/appconfig/strict_validate.go | 272 +++ internal/appconfig/strict_validate_test.go | 197 ++ .../testdata/compose-autodetect.toml | 4 + internal/appconfig/testdata/compose.toml | 4 + .../appconfig/testdata/full-reference.toml | 1 + internal/appconfig/validation.go | 118 +- internal/appsecrets/minvers.go | 64 + internal/appsecrets/secrets.go | 62 + internal/build/imgsrc/buildkit.go | 32 +- internal/build/imgsrc/buildkit_builder.go | 200 ++ internal/build/imgsrc/depot.go | 125 +- internal/build/imgsrc/depot_test.go | 22 + internal/build/imgsrc/docker.go | 82 +- internal/build/imgsrc/docker_test.go | 36 +- internal/build/imgsrc/dockerfile_builder.go | 19 +- internal/build/imgsrc/ensure_builder.go | 367 ++-- internal/build/imgsrc/ensure_builder_test.go | 78 +- internal/build/imgsrc/flaps_mock_test.go | 706 +++++++ internal/build/imgsrc/nixpacks_builder.go | 11 +- .../build/imgsrc/remote_image_resolver.go | 15 +- internal/build/imgsrc/resolver.go | 96 +- internal/build/imgsrc/resolver_test.go | 17 + internal/buildinfo/buildinfo.go | 4 +- internal/certificate/errors.go | 29 + internal/cli/cli.go | 3 +- internal/cmdutil/preparers/preparers.go | 2 +- internal/command/apps/apps.go | 2 +- internal/command/apps/create.go | 36 + internal/command/apps/destroy.go | 5 +- internal/command/apps/move.go | 15 +- internal/command/auth/docker.go | 9 +- internal/command/auth/webauth/webauth.go | 29 +- internal/command/certificates/root.go | 368 ++-- internal/command/command.go | 20 + internal/command/command_run.go | 45 +- internal/command/config/env.go | 23 +- internal/command/config/validate.go | 56 +- internal/command/console/console.go | 23 +- internal/command/consul/attach.go | 9 +- internal/command/consul/detach.go | 11 +- internal/command/deploy/deploy.go | 44 +- internal/command/deploy/deploy_build.go | 57 +- internal/command/deploy/deploy_build_test.go | 33 +- internal/command/deploy/deploy_first.go | 1 + internal/command/deploy/machinebasedtest.go | 27 +- internal/command/deploy/machines.go | 10 +- .../deploy/machines_deploymachinesapp.go | 52 +- .../command/deploy/machines_launchinput.go | 100 +- .../deploy/machines_launchinput_test.go | 8 +- .../command/deploy/machines_releasecommand.go | 117 +- internal/command/deploy/machines_test.go | 25 +- internal/command/deploy/mock_client_test.go | 37 +- internal/command/deploy/plan.go | 45 +- internal/command/deploy/plan_test.go | 14 +- internal/command/deploy/statics/addon.go | 5 +- internal/command/deploy/statics/move.go | 2 +- internal/command/deploy/strategy_bluegreen.go | 58 +- .../command/deploy/strategy_bluegreen_test.go | 1 + internal/command/extensions/arcjet/create.go | 6 +- internal/command/extensions/arcjet/destroy.go | 2 +- internal/command/extensions/core/core.go | 36 +- .../command/extensions/enveloop/create.go | 65 - .../command/extensions/enveloop/dashboard.go | 43 - .../command/extensions/enveloop/destroy.go | 72 - .../command/extensions/enveloop/enveloop.go | 21 - internal/command/extensions/enveloop/list.go | 53 - .../command/extensions/enveloop/status.go | 65 - internal/command/extensions/extensions.go | 4 - .../command/extensions/fly_mysql/create.go | 6 +- .../command/extensions/fly_mysql/destroy.go | 2 +- internal/command/extensions/kafka/create.go | 69 - .../command/extensions/kafka/dashboard.go | 43 - internal/command/extensions/kafka/destroy.go | 72 - internal/command/extensions/kafka/kafka.go | 21 - internal/command/extensions/kafka/list.go | 53 - internal/command/extensions/kafka/status.go | 65 - internal/command/extensions/kafka/update.go | 51 - .../command/extensions/kubernetes/destroy.go | 2 +- .../extensions/kubernetes/kubeconfig.go | 5 +- internal/command/extensions/sentry/create.go | 6 +- internal/command/extensions/sentry/destroy.go | 2 +- .../command/extensions/supabase/create.go | 115 -- .../command/extensions/supabase/destroy.go | 2 +- .../command/extensions/supabase/supabase.go | 2 +- internal/command/extensions/tigris/create.go | 11 +- internal/command/extensions/tigris/destroy.go | 2 +- internal/command/extensions/tigris/status.go | 4 +- internal/command/extensions/tigris/update.go | 17 +- internal/command/extensions/vector/create.go | 8 +- internal/command/extensions/vector/destroy.go | 2 +- internal/command/extensions/vector/status.go | 2 +- internal/command/extensions/wafris/create.go | 6 +- internal/command/extensions/wafris/destroy.go | 2 +- internal/command/image/show.go | 2 +- internal/command/image/update.go | 2 +- internal/command/image/update_machines.go | 42 +- internal/command/ips/allocate_interactive.go | 274 +++ internal/command/ips/ips.go | 1 + internal/command/ips/private.go | 19 +- internal/command/launch/cmd.go | 110 +- internal/command/launch/cmd_test.go | 234 +++ internal/command/launch/deploy.go | 10 +- internal/command/launch/describe_plan.go | 53 +- internal/command/launch/dockerfiles.go | 2 +- internal/command/launch/launch.go | 188 +- internal/command/launch/launch_databases.go | 220 ++- internal/command/launch/launch_extensions.go | 6 +- internal/command/launch/launch_frameworks.go | 44 +- internal/command/launch/plan/postgres.go | 204 +- internal/command/launch/plan/postgres_test.go | 362 ++++ internal/command/launch/plan_builder.go | 39 +- internal/command/launch/plan_commands.go | 31 +- internal/command/launch/sessions.go | 2 +- internal/command/launch/sourceinfo.go | 5 +- internal/command/launch/state.go | 15 +- internal/command/launch/webui.go | 54 +- internal/command/machine/clone.go | 15 +- internal/command/machine/machine.go | 1 + internal/command/machine/place.go | 148 ++ internal/command/machine/run.go | 115 +- internal/command/machine/select.go | 2 +- internal/command/machine/status.go | 2 +- internal/command/machine/update.go | 27 +- internal/command/mcp/config.go | 736 ++++++++ internal/command/mcp/destroy.go | 138 ++ internal/command/mcp/launch.go | 448 +++++ internal/command/mcp/list.go | 170 ++ internal/command/mcp/logs.go | 85 + internal/command/mcp/mcp.go | 58 + internal/command/mcp/proxy.go | 376 ++++ internal/command/mcp/proxy/passthru.go | 214 +++ internal/command/mcp/proxy/relay.go | 273 +++ internal/command/mcp/proxy/types.go | 12 + internal/command/mcp/server.go | 494 +++++ internal/command/mcp/server/apps.go | 220 +++ internal/command/mcp/server/certs.go | 162 ++ internal/command/mcp/server/ips.go | 213 +++ internal/command/mcp/server/logs.go | 42 + internal/command/mcp/server/machine.go | 1669 +++++++++++++++++ internal/command/mcp/server/orgs.go | 167 ++ internal/command/mcp/server/platform.go | 38 + internal/command/mcp/server/secrets.go | 125 ++ internal/command/mcp/server/status.go | 24 + internal/command/mcp/server/types.go | 34 + internal/command/mcp/server/volumes.go | 426 +++++ internal/command/mcp/volume.go | 139 ++ internal/command/mcp/wrap.go | 397 ++++ internal/command/mpg/attach.go | 117 ++ internal/command/mpg/connect.go | 89 + internal/command/mpg/create.go | 276 +++ internal/command/mpg/destroy.go | 81 + internal/command/mpg/list.go | 93 + internal/command/mpg/mpg.go | 307 +++ internal/command/mpg/mpg_test.go | 932 +++++++++ internal/command/mpg/plans.go | 36 + internal/command/mpg/proxy.go | 146 ++ internal/command/mpg/status.go | 86 + internal/command/orgs/create.go | 4 +- internal/command/platform/regions.go | 85 +- internal/command/platform/vmsizes.go | 48 +- internal/command/postgres/add_flycast.go | 14 +- internal/command/postgres/attach.go | 11 +- internal/command/postgres/backup.go | 32 +- internal/command/postgres/barman.go | 17 +- internal/command/postgres/config_update.go | 2 +- internal/command/postgres/create.go | 51 +- internal/command/postgres/detach.go | 34 +- internal/command/postgres/failover.go | 9 +- internal/command/postgres/import.go | 32 +- internal/command/postgres/postgres.go | 84 +- internal/command/postgres/renew_certs.go | 11 +- internal/command/redis/attach.go | 8 +- internal/command/redis/create.go | 8 +- internal/command/redis/destroy.go | 2 +- internal/command/redis/status.go | 4 +- internal/command/root/root.go | 4 + internal/command/scale/count.go | 4 +- internal/command/scale/count_machines.go | 14 +- internal/command/scale/machine_defaults.go | 2 +- internal/command/scale/machines.go | 13 +- internal/command/scale/vm.go | 7 +- internal/command/secrets/deploy.go | 27 +- internal/command/secrets/import.go | 13 +- internal/command/secrets/key_delete.go | 17 +- internal/command/secrets/key_set.go | 18 +- internal/command/secrets/keys.go | 41 +- internal/command/secrets/keys_common.go | 16 +- internal/command/secrets/keys_list.go | 18 +- internal/command/secrets/list.go | 16 +- internal/command/secrets/parser.go | 19 +- internal/command/secrets/parser_test.go | 20 + internal/command/secrets/secrets.go | 34 +- internal/command/secrets/set.go | 21 +- internal/command/secrets/sync.go | 47 + internal/command/secrets/unset.go | 22 +- internal/command/ssh/connect.go | 31 +- internal/command/ssh/console.go | 135 +- internal/command/ssh/sftp.go | 364 +++- internal/command/ssh/ssh_terminal.go | 2 +- internal/command/status/machines.go | 4 +- internal/command/version/upgrade.go | 2 +- internal/command/volumes/create.go | 16 + internal/command/volumes/fork.go | 6 + internal/command/volumes/lsvd/setup.go | 25 +- internal/command/volumes/snapshots/list.go | 48 +- internal/command/wireguard/root.go | 6 + internal/command/wireguard/wireguard.go | 3 +- internal/config/config.go | 17 +- internal/config/file.go | 15 +- internal/config/machine.go | 144 ++ internal/config/tokens.go | 25 +- internal/containerconfig/compose.go | 408 ++++ internal/containerconfig/compose_test.go | 541 ++++++ internal/containerconfig/parse.go | 71 + internal/containerconfig/testdata/nginx.conf | 10 + internal/flag/context.go | 21 + internal/flag/flag.go | 51 +- internal/flag/flagnames/constants.go | 6 + internal/flag/validation/compression.go | 33 + internal/flapsutil/flaps_client.go | 12 +- internal/flapsutil/helpers.go | 37 + internal/flyutil/client.go | 6 +- internal/haikunator/haikunator.go | 2 +- internal/incidents/hosts.go | 42 +- internal/incidents/statuspage.go | 45 +- internal/inmem/client.go | 14 +- internal/inmem/flaps_client.go | 30 +- internal/launchdarkly/launchdarkly.go | 46 + internal/machine/leasable_machine.go | 70 +- internal/machine/machine_set.go | 46 +- internal/machine/update.go | 6 +- internal/metrics/synthetics/agent.go | 2 +- internal/mock/client.go | 11 +- internal/mock/flaps_client.go | 48 +- internal/render/render.go | 15 +- internal/set/set_test.go | 2 +- internal/tracing/tracing.go | 5 - internal/uiex/builders.go | 75 + internal/uiex/client.go | 67 + internal/uiex/managed_postgres.go | 377 ++++ internal/uiexutil/client.go | 36 + internal/uiexutil/uiexutil.go | 20 + internal/version/version_test.go | 2 +- iostreams/color.go | 37 +- iostreams/iostreams.go | 30 +- proxy/connect.go | 2 +- retry/retry.go | 49 - retry/retry_test.go | 138 -- scanner/jsFramework.go | 50 +- scanner/laravel.go | 2 +- scanner/node.go | 6 +- scanner/phoenix.go | 199 +- scanner/python.go | 1 + scanner/rails.go | 263 +-- scanner/rust.go | 19 +- scanner/scanner.go | 2 + scanner/templates/deno/Dockerfile | 4 +- scanner/templates/python/.dockerignore | 1 + scripts/preflight.sh | 2 +- scripts/publish_docs.sh | 6 +- ssh/client.go | 11 +- test/fixtures/bun-basic/index.ts | 2 +- test/fixtures/bun-basic/package.json | 2 +- test/fixtures/deno-no-config/index.ts | 2 +- .../.gitignore | 2 +- .../README.md | 14 +- .../entrypoint.sh | 2 +- .../fixtures/deploy-phoenix-sqlite/.gitignore | 1 - .../deploy-phoenix-sqlite/assets/js/app.js | 1 - .../config/credentials.yml.enc | 2 +- .../config/credentials.yml.enc | 2 +- .../deploy-rails-8/config/credentials.yml.enc | 2 +- test/fixtures/django-basic/mysite/urls.py | 2 +- test/fixtures/django-basic/polls/urls.py | 2 +- test/fixtures/django-basic/polls/views.py | 2 +- test/fixtures/static/index.html | 2 +- .../fixtures/example-buildpack/Gemfile | 2 +- .../fixtures/example-buildpack/Gemfile.lock | 29 +- test/preflight/fly_console_test.go | 40 +- test/preflight/fly_deploy_test.go | 10 +- test/preflight/fly_postgres_test.go | 16 +- test/preflight/fly_scale_test.go | 2 +- test/preflight/fly_volume_test.go | 19 +- test/preflight/fly_wireguard_test.go | 60 + test/testlib/test_env.go | 15 +- tools/distribute/bundle/meta.go | 2 +- tools/distribute/flypkgs/errors.go | 5 +- wg/ws.go | 29 +- 331 files changed, 19398 insertions(+), 4089 deletions(-) delete mode 100644 .github/workflows/preflight_gpu.yml create mode 100644 .github/workflows/sync-docs.yml delete mode 100644 .github/workflows/test_install.yml create mode 100644 Dockerfile.mcp create mode 100644 agent/remote.go delete mode 100644 installers/install.ps1 delete mode 100644 installers/install.sh create mode 100644 internal/appconfig/strict_validate.go create mode 100644 internal/appconfig/strict_validate_test.go create mode 100644 internal/appconfig/testdata/compose-autodetect.toml create mode 100644 internal/appconfig/testdata/compose.toml create mode 100644 internal/appsecrets/minvers.go create mode 100644 internal/appsecrets/secrets.go create mode 100644 internal/build/imgsrc/buildkit_builder.go create mode 100644 internal/build/imgsrc/depot_test.go create mode 100644 internal/build/imgsrc/flaps_mock_test.go create mode 100644 internal/certificate/errors.go delete mode 100644 internal/command/extensions/enveloop/create.go delete mode 100644 internal/command/extensions/enveloop/dashboard.go delete mode 100644 internal/command/extensions/enveloop/destroy.go delete mode 100644 internal/command/extensions/enveloop/enveloop.go delete mode 100644 internal/command/extensions/enveloop/list.go delete mode 100644 internal/command/extensions/enveloop/status.go delete mode 100644 internal/command/extensions/kafka/create.go delete mode 100644 internal/command/extensions/kafka/dashboard.go delete mode 100644 internal/command/extensions/kafka/destroy.go delete mode 100644 internal/command/extensions/kafka/kafka.go delete mode 100644 internal/command/extensions/kafka/list.go delete mode 100644 internal/command/extensions/kafka/status.go delete mode 100644 internal/command/extensions/kafka/update.go delete mode 100644 internal/command/extensions/supabase/create.go create mode 100644 internal/command/ips/allocate_interactive.go create mode 100644 internal/command/launch/cmd_test.go create mode 100644 internal/command/launch/plan/postgres_test.go create mode 100644 internal/command/machine/place.go create mode 100644 internal/command/mcp/config.go create mode 100644 internal/command/mcp/destroy.go create mode 100644 internal/command/mcp/launch.go create mode 100644 internal/command/mcp/list.go create mode 100644 internal/command/mcp/logs.go create mode 100644 internal/command/mcp/mcp.go create mode 100644 internal/command/mcp/proxy.go create mode 100644 internal/command/mcp/proxy/passthru.go create mode 100644 internal/command/mcp/proxy/relay.go create mode 100644 internal/command/mcp/proxy/types.go create mode 100644 internal/command/mcp/server.go create mode 100644 internal/command/mcp/server/apps.go create mode 100644 internal/command/mcp/server/certs.go create mode 100644 internal/command/mcp/server/ips.go create mode 100644 internal/command/mcp/server/logs.go create mode 100644 internal/command/mcp/server/machine.go create mode 100644 internal/command/mcp/server/orgs.go create mode 100644 internal/command/mcp/server/platform.go create mode 100644 internal/command/mcp/server/secrets.go create mode 100644 internal/command/mcp/server/status.go create mode 100644 internal/command/mcp/server/types.go create mode 100644 internal/command/mcp/server/volumes.go create mode 100644 internal/command/mcp/volume.go create mode 100644 internal/command/mcp/wrap.go create mode 100644 internal/command/mpg/attach.go create mode 100644 internal/command/mpg/connect.go create mode 100644 internal/command/mpg/create.go create mode 100644 internal/command/mpg/destroy.go create mode 100644 internal/command/mpg/list.go create mode 100644 internal/command/mpg/mpg.go create mode 100644 internal/command/mpg/mpg_test.go create mode 100644 internal/command/mpg/plans.go create mode 100644 internal/command/mpg/proxy.go create mode 100644 internal/command/mpg/status.go create mode 100644 internal/command/secrets/sync.go create mode 100644 internal/config/machine.go create mode 100644 internal/containerconfig/compose.go create mode 100644 internal/containerconfig/compose_test.go create mode 100644 internal/containerconfig/parse.go create mode 100644 internal/containerconfig/testdata/nginx.conf create mode 100644 internal/flag/validation/compression.go create mode 100644 internal/flapsutil/helpers.go create mode 100644 internal/uiex/builders.go create mode 100644 internal/uiex/client.go create mode 100644 internal/uiex/managed_postgres.go create mode 100644 internal/uiexutil/client.go create mode 100644 internal/uiexutil/uiexutil.go delete mode 100644 retry/retry.go delete mode 100644 retry/retry_test.go create mode 100644 test/preflight/fly_wireguard_test.go diff --git a/.github/workflows/auto-release.yml b/.github/workflows/auto-release.yml index 536f112dad..e6c2e26b9f 100644 --- a/.github/workflows/auto-release.yml +++ b/.github/workflows/auto-release.yml @@ -24,33 +24,6 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} WITH_V: true + VERBOSE: true DEFAULT_BUMP: "patch" - - sync_docs: - if: github.ref == 'refs/heads/master' - needs: release - runs-on: ubuntu-latest - steps: - - name: Checkout flyctl - uses: actions/checkout@v4 - - uses: jnwng/github-app-installation-token-action@778da2ab509f3ef5e4b902bc15daf25ef3e34939 - id: installationToken - with: - appId: 339363 - installationId: 37947271 - privateKey: ${{ secrets.DOCS_SYNCER_GITHUB_APP_PRIVATE_KEY }} - - name: Checkout docs - uses: actions/checkout@v4 - with: - repository: superfly/docs - token: ${{ steps.installationToken.outputs.token }} - path: docs - - uses: actions/setup-go@v5 - with: - go-version-file: "go.mod" - check-latest: true - - name: Publish CLI docs - id: publish-cli-docs - env: - GITHUB_TOKEN: ${{ steps.installationToken.outputs.token }} - run: scripts/publish_docs.sh ${{ github.ref_name }} + GIT_API_TAGGING: false diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b55ef9e2b8..1ddc4655d5 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -39,9 +39,8 @@ jobs: needs: test_build uses: ./.github/workflows/preflight.yml secrets: inherit - + # deployer-tests: # needs: test_build # uses: ./.github/workflows/deployer-tests.yml # secrets: inherit - diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index 4754266fab..65b2929a73 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -34,8 +34,6 @@ jobs: extra_args: --all-files --hook-stage=manual lint: - strategy: - fail-fast: false name: lint runs-on: ubuntu-latest steps: @@ -43,8 +41,6 @@ jobs: - uses: actions/setup-go@v5 with: go-version-file: "go.mod" - # check-latest: true - - uses: golangci/golangci-lint-action@v6 + - uses: golangci/golangci-lint-action@v8 with: - version: v1.61.0 - working-directory: . + version: v2.4.0 diff --git a/.github/workflows/deployer-tests.yml b/.github/workflows/deployer-tests.yml index 9d5b034017..f987a5d526 100644 --- a/.github/workflows/deployer-tests.yml +++ b/.github/workflows/deployer-tests.yml @@ -1,7 +1,7 @@ name: Deployer tests on: push: - + jobs: build-deployer: runs-on: ubuntu-latest @@ -9,14 +9,14 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 - + - uses: actions/setup-go@v5 with: go-version-file: "go.mod" check-latest: true - name: "Build flyctl" run: make build - + - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Login to GitHub Container Registry @@ -25,14 +25,14 @@ jobs: registry: https://index.docker.io/v1/ username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_PASSWORD }} - + - name: Upload flyctl for the deployer tests uses: actions/upload-artifact@v4 with: name: deployer-flyctl path: bin/flyctl overwrite: true - + - name: Build and push uses: docker/build-push-action@v6 with: @@ -42,7 +42,7 @@ jobs: tags: flyio/deployer:${{ github.sha }} cache-from: type=gha cache-to: type=gha,mode=max - + deployer-tests: needs: build-deployer runs-on: ubuntu-latest @@ -121,4 +121,4 @@ jobs: FLY_PREFLIGHT_TEST_FLY_ORG: flyctl-ci-preflight FLY_PREFLIGHT_TEST_APP_PREFIX: "deployertest" run: | - ./scripts/delete_preflight_apps.sh "$FLY_PREFLIGHT_TEST_APP_PREFIX" \ No newline at end of file + ./scripts/delete_preflight_apps.sh "$FLY_PREFLIGHT_TEST_APP_PREFIX" diff --git a/.github/workflows/preflight.yml b/.github/workflows/preflight.yml index f4ef3d0b5d..6022a8ab7f 100644 --- a/.github/workflows/preflight.yml +++ b/.github/workflows/preflight.yml @@ -32,15 +32,17 @@ jobs: - name: Install gotesplit, set FLY_PREFLIGHT_TEST_APP_PREFIX run: | curl -sfL https://raw.githubusercontent.com/Songmu/gotesplit/v0.2.1/install.sh | sh -s - echo "FLY_PREFLIGHT_TEST_APP_PREFIX=pf-gha-$(openssl rand -hex 4)" >> "$GITHUB_ENV" + echo "FLY_PREFLIGHT_TEST_APP_PREFIX=gha-$GITHUB_RUN_ID-$GITHUB_RUN_ATTEMPT" >> "$GITHUB_ENV" # If this workflow is triggered by code changes (eg PRs), download the binary to save time. - uses: actions/download-artifact@v4 + id: download-flyctl with: name: flyctl path: master-build continue-on-error: true # But if this is a manual run, build the binary first. - run: make + if: steps.download-flyctl.outcome == 'failure' - name: Run preflight tests id: preflight env: diff --git a/.github/workflows/preflight_gpu.yml b/.github/workflows/preflight_gpu.yml deleted file mode 100644 index e54e6d92ff..0000000000 --- a/.github/workflows/preflight_gpu.yml +++ /dev/null @@ -1,113 +0,0 @@ -name: Preflight GPU Tests - -on: - schedule: - - cron: "30 0 * * *" - workflow_dispatch: - inputs: - reason: - description: Brief reason for running this workflow manually - required: false - default: User initiated run - type: string - workflow_call: - -jobs: - build: - runs-on: ubuntu-latest-m - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - uses: actions/setup-go@v5 - with: - go-version-file: "go.mod" - check-latest: true - - name: "Place wintun.dll" - run: cp deps/wintun/bin/amd64/wintun.dll ./ - - name: build - uses: goreleaser/goreleaser-action@v5 - env: - BUILD_ENV: "development" - with: - version: latest - args: build --clean --snapshot --verbose - - name: Upload flyctl for preflight - uses: actions/upload-artifact@v4 - with: - name: flyctl - path: dist/default_linux_amd64_v1/flyctl - overwrite: true - - preflight-gpu-tests: - needs: build - if: ${{ github.repository == 'superfly/flyctl' }} - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - # Add "a10" to this list once capacity increases. - vm_size: ["a100-40gb", "l40s"] - parallelism: [20] - index: - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version-file: "go.mod" - check-latest: true - - name: Get go version - id: go-version - run: echo "name=version::$(go env GOVERSION)" >> $GITHUB_OUTPUT - - name: Install gotesplit, set FLY_PREFLIGHT_TEST_APP_PREFIX - run: | - curl -sfL https://raw.githubusercontent.com/Songmu/gotesplit/v0.2.1/install.sh | sh -s - echo "FLY_PREFLIGHT_TEST_APP_PREFIX=pf-gpu-gha-$(openssl rand -hex 4)" >> "$GITHUB_ENV" - - uses: actions/download-artifact@v4 - with: - name: flyctl - path: master-build - - name: Run preflight GPU tests - id: preflight - env: - FLY_PREFLIGHT_TEST_ACCESS_TOKEN: ${{ secrets.FLYCTL_PREFLIGHT_CI_FLY_API_TOKEN }} - FLY_PREFLIGHT_TEST_FLY_ORG: flyctl-ci-preflight - # This VM size is only available in ORD. - FLY_PREFLIGHT_TEST_FLY_REGIONS: ord - FLY_PREFLIGHT_TEST_NO_PRINT_HISTORY_ON_FAIL: "true" - FLY_FORCE_TRACE: "true" - FLY_PREFLIGHT_TEST_VM_SIZE: ${{ matrix.vm_size }} - run: | - mv master-build/flyctl bin/flyctl - chmod +x bin/flyctl - export PATH=$PWD/bin:$PATH - echo -n failed= >> $GITHUB_OUTPUT - ./scripts/preflight.sh -r "${{ github.ref }}" -t "${{ matrix.parallelism }}" -i "${{ matrix.index }}" -o $GITHUB_OUTPUT - - name: Post failure to slack - if: ${{ github.ref == 'refs/heads/master' && failure() }} - uses: slackapi/slack-github-action@485a9d42d3a73031f12ec201c457e2162c45d02d - with: - webhook: ${{ secrets.PREFLIGHT_SLACK_WEBHOOK_URL }} - webhook-type: incoming-webhook - payload: | - { - "blocks": [ - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": ":sob: preflight GPU tests failed: ${{ steps.preflight.outputs.failed }} ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" - } - } - ] - } - - uses: superfly/flyctl-actions/setup-flyctl@master - if: always() - - name: Clean up any un-deleted preflight apps - if: always() - env: - FLY_API_TOKEN: ${{ secrets.FLYCTL_PREFLIGHT_CI_FLY_API_TOKEN }} - FLY_PREFLIGHT_TEST_FLY_ORG: flyctl-ci-preflight - run: | - ./scripts/delete_preflight_apps.sh "$FLY_PREFLIGHT_TEST_APP_PREFIX" diff --git a/.github/workflows/sync-docs.yml b/.github/workflows/sync-docs.yml new file mode 100644 index 0000000000..da763c7041 --- /dev/null +++ b/.github/workflows/sync-docs.yml @@ -0,0 +1,41 @@ +name: Automatically sync docs for flyctl + +on: + workflow_run: + workflows: ["Automatically release a new version of flyctl"] + types: + - completed + + workflow_dispatch: + +permissions: + contents: write + +jobs: + sync_docs: + if: github.ref == 'refs/heads/master' + runs-on: ubuntu-latest + steps: + - name: Checkout flyctl + uses: actions/checkout@v4 + - uses: jnwng/github-app-installation-token-action@778da2ab509f3ef5e4b902bc15daf25ef3e34939 + id: installationToken + with: + appId: 339363 + installationId: 37947271 + privateKey: ${{ secrets.DOCS_SYNCER_GITHUB_APP_PRIVATE_KEY }} + - name: Checkout docs + uses: actions/checkout@v4 + with: + repository: superfly/docs + token: ${{ steps.installationToken.outputs.token }} + path: docs + - uses: actions/setup-go@v5 + with: + go-version-file: "go.mod" + check-latest: true + - name: Publish CLI docs + id: publish-cli-docs + env: + GITHUB_TOKEN: ${{ steps.installationToken.outputs.token }} + run: scripts/publish_docs.sh ${{ github.ref_name }} diff --git a/.github/workflows/test_install.yml b/.github/workflows/test_install.yml deleted file mode 100644 index cc408f6cf0..0000000000 --- a/.github/workflows/test_install.yml +++ /dev/null @@ -1,126 +0,0 @@ -name: Test Installation - -on: - pull_request: - paths: - - '.github/workflows/test_install.yml' - - 'installers/**' - push: - branches: - - master - workflow_dispatch: - -jobs: - test-docker: - strategy: - fail-fast: false - max-parallel: 4 - matrix: - image: - - "ubuntu:20.04" - - "ubuntu:22.04" - - "ubuntu:23.04" - - "debian:oldstable-slim" - - "debian:stable-slim" - - "alpine:latest" - - "archlinux:latest" - - "amazonlinux:latest" - runs-on: ubuntu-latest - env: - SHELL: /bin/bash - # FLYCTL_INSTALL: /usr/local/bin - container: - image: ${{ matrix.image }} - options: --user root - steps: - - name: Install Dependencies (apt-get) - if: | - contains(matrix.image, 'ubuntu') || - contains(matrix.image, 'debian') - run: | - apt-get update && apt-get install -y curl - - name: Install Dependencies (apk) - if: | - contains(matrix.image, 'alpine') - run: | - apk --no-cache add curl - - name: Install Dependencies (yum) - if: | - contains(matrix.image, 'amazonlinux') - # actions/checkout needs tar & gzip, so install them too - run: yum install -y --allowerasing tar gzip curl - - name: checkout - uses: actions/checkout@v4 - - name: run installer - run: | - echo "$HOME/.fly/bin" >> $GITHUB_PATH - sh ./installers/install.sh - - name: check that `flyctl` binary is installed - run: flyctl version - - name: check that `fly` symlink is installed - run: fly version - # TODO[md]: this is currently broken on stable. restore once fixed. - # - name: Check that the install config was saved - # run: | - # grep -x -q "channel: stable" ~/.fly/state.yml - - test-native: - strategy: - fail-fast: false - max-parallel: 4 - matrix: - os: - # From https://github.com/actions/runner-images/tree/main?tab=readme-ov-file#available-images. - - ubuntu-24.04 - - ubuntu-22.04 - - ubuntu-20.04 - - macos-14 - - macos-13 - runs-on: ${{ matrix.os }} - steps: - - name: checkout - uses: actions/checkout@v4 - - name: Install (macOS/Linux) - run: | - echo "$HOME/.fly/bin" >> $GITHUB_PATH - sh ./installers/install.sh - - name: Check that `flyctl` works - run: flyctl version - - name: Check that `fly` works - run: fly version - # TODO[md]: this is currently broken on the latest stable releases. restore once fixed. - # - name: Check that the install config was saved - # run: | - # grep -x -q "channel: stable" ~/.fly/state.yml - - test-windows: - strategy: - fail-fast: false - max-parallel: 4 - matrix: - os: - - windows-latest - - windows-2019 - runs-on: ${{ matrix.os }} - env: - FLYCTL_INSTALL: "C:\\flyctl" - steps: - - name: checkout - uses: actions/checkout@v4 - - name: Install - shell: powershell - run: | - ./installers/install.ps1 - - name: Set Path - run: | - echo "$env:FLYCTL_INSTALL\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - - run: env - shell: powershell - - run: ls $env:FLYCTL_INSTALL\bin - shell: powershell - - name: Check that `flyctl` works - shell: powershell - run: flyctl version - - name: Check that `fly` works - shell: powershell - run: fly version diff --git a/.gitignore b/.gitignore index 9896192d99..8c14109e7d 100644 --- a/.gitignore +++ b/.gitignore @@ -42,4 +42,5 @@ out # generated release meta release.json -.fly \ No newline at end of file +.fly +CLAUDE.md diff --git a/.golangci.yml b/.golangci.yml index 27746f007e..0d6b56ca24 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,129 +1,70 @@ -issues: - # List of regexps of issue texts to exclude, empty list by default. - # But independently from this option we use default exclude patterns, - # it can be disabled by `exclude-use-default: false`. To list all - # excluded by default patterns execute `golangci-lint run --help` - - exclude-rules: - # Exclude gosimple bool check - - linters: - - gosimple - text: "S(1002|1008|1021)" - # Exclude failing staticchecks for now - - linters: - - staticcheck - text: "SA(1006|1019|4006|4010|4017|5007|6005|9004):" - # Exclude lll issues for long lines with go:generate - - linters: - - lll - source: "^//go:generate " - - # Maximum issues count per one linter. Set to 0 to disable. Default is 50. - max-issues-per-linter: 0 - - # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. - max-same-issues: 0 - +version: "2" +run: + concurrency: 4 + go: "1.24" + issues-exit-code: 1 + tests: true linters: - disable-all: true + default: none enable: - # - gofumpt - # - goimports - - gofmt - - gosimple - govet - ineffassign - staticcheck - unconvert - unused - fast: true - -# options for analysis running -run: - go: "1.21" - - # default concurrency is a available CPU number - concurrency: 4 - - # timeout for analysis, e.g. 30s, 5m, default is 1m - timeout: 10m - - # exit code when at least one issue was found, default is 1 - issues-exit-code: 1 - - # include test files or not, default is true - tests: true - - # list of build tags, all linters use it. Default is empty list. - #build-tags: - # - mytag - - # default is true. Enables skipping of directories: - # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ - skip-dirs-use-default: true - - # which files to skip: they will be analyzed, but issues from them - # won't be reported. Default value is empty list, but there is - # no need to include all autogenerated files, we confidently recognize - # autogenerated files. If it's not please let us know. - skip-files: - - ".*\\.hcl2spec\\.go$" - - "docstrings/gen.go" - # - lib/bad.go - - # by default isn't set. If set we pass it to "go list -mod={option}". From "go help modules": - # If invoked with -mod=readonly, the go command is disallowed from the implicit - # automatic updating of go.mod described above. Instead, it fails when any changes - # to go.mod are needed. This setting is most useful to check that go.mod does - # not need updates, such as in a continuous integration and testing system. - # If invoked with -mod=vendor, the go command assumes that the vendor - # directory holds the correct copies of dependencies and ignores - # the dependency descriptions in go.mod. - # modules-download-mode: vendor - -# output configuration options -output: - # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" - format: colored-line-number - - # print lines of code with issue, default is true - print-issued-lines: true - - # print linter name in the end of issue text, default is true - print-linter-name: true - - # make issues output unique by line, default is true - uniq-by-line: true - -# all available settings of specific linters -linters-settings: - gofumpt: - module-path: github.com/superfly/flyctl - errcheck: - # report about not checking of errors in type assetions: `a := b.(MyStruct)`; - # default is false: such cases aren't reported by default. - check-type-assertions: false - - # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; - # default is false: such cases aren't reported by default. - check-blank: false - - # [deprecated] comma-separated list of pairs of the form pkg:regex - # the regex is used to ignore names within pkg. (default "fmt:.*"). - # see https://github.com/kisielk/errcheck#the-deprecated-method for details - ignore: fmt:.*,io:Close - - # path to a file containing a list of functions to exclude from checking - # see https://github.com/kisielk/errcheck#excluding-functions for details - #exclude: /path/to/file.txt - govet: - settings: - printf: - funcs: - - github.com/superfly/flyctl/terminal.Debugf - - github.com/superfly/flyctl/terminal.Infof - - github.com/superfly/flyctl/terminal.Warnf - - github.com/superfly/flyctl/terminal.Errorf - - github.com/superfly/flyctl/render.Printf - - github.com/superfly/flyctl/render.Detailf - - github.com/superfly/flyctl/render.Donef + - misspell + settings: + errcheck: + exclude-functions: + - fmt.* + - io.Close + govet: + settings: + printf: + funcs: + - github.com/superfly/flyctl/terminal.Debugf + - github.com/superfly/flyctl/terminal.Infof + - github.com/superfly/flyctl/terminal.Warnf + - github.com/superfly/flyctl/terminal.Errorf + - github.com/superfly/flyctl/render.Printf + - github.com/superfly/flyctl/render.Detailf + - github.com/superfly/flyctl/render.Donef + staticcheck: + checks: + - all + - -SA1019 # ... is deprecated + - -ST1003 # struct field ... should be ... (mostly acronyms such as Http -> HTTP) + - -ST1005 # error strings should not be capitalized + - -ST1008 # error should be returned as the last argument + - -ST1012 # error var ... should have name of the form Err... + - -QF1001 # could apply De Morgan's law + - -QF1002 # could use tagged switch + - -QF1003 # could use tagged switch on app + - -QF1008 # could remove embedded field ... from selector + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + rules: + - linters: + - lll + source: "^//go:generate " + paths: + - third_party$ + - builtin$ + - examples$ +issues: + max-issues-per-linter: 0 + max-same-issues: 0 +formatters: + enable: + - gofmt + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/.goreleaser.2.yml b/.goreleaser.2.yml index 0d561341c5..e6fcfc221a 100644 --- a/.goreleaser.2.yml +++ b/.goreleaser.2.yml @@ -103,6 +103,15 @@ dockers: - "ghcr.io/superfly/flyctl:latest" - "ghcr.io/superfly/flyctl:v{{ .Version }}" skip_push: auto + - goos: linux + goarch: amd64 + dockerfile: Dockerfile.mcp + image_templates: + - "flyio/mcp:latest" + - "flyio/mcp:v{{ .Version }}" + - "ghcr.io/superfly/mcp:latest" + - "ghcr.io/superfly/mcp:v{{ .Version }}" + skip_push: auto release: disable: false diff --git a/.goreleaser.yml b/.goreleaser.yml index 5dd158e9b3..8ee35e59f0 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -81,6 +81,15 @@ dockers: - "ghcr.io/superfly/flyctl:latest" - "ghcr.io/superfly/flyctl:v{{ .Version }}" skip_push: auto + - goos: linux + goarch: amd64 + dockerfile: Dockerfile.mcp + image_templates: + - "flyio/mcp:latest" + - "flyio/mcp:v{{ .Version }}" + - "ghcr.io/superfly/mcp:latest" + - "ghcr.io/superfly/mcp:v{{ .Version }}" + skip_push: auto checksum: name_template: "checksums.txt" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c66aa20851..15449e1de6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,24 +1,24 @@ # See https://pre-commit.com for more information # See https://pre-commit.com/hooks.html for more hooks repos: -- repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v6.0.0 hooks: - - id: trailing-whitespace - - id: end-of-file-fixer - - id: check-yaml - - id: check-added-large-files + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-added-large-files -- repo: https://github.com/tekwizely/pre-commit-golang - rev: v1.0.0-rc.1 + - repo: https://github.com/tekwizely/pre-commit-golang + rev: v1.0.0-rc.2 hooks: - - id: go-mod-tidy + - id: go-mod-tidy -# NOTE: This pre-commit hook is ignored when running on Github Workflow -# because goalngci-lint github action is much more useful than the pre-commit action. -# The trick is to run github action only for "manual" hook stage -- repo: https://github.com/golangci/golangci-lint - rev: v1.54.2 + # NOTE: This pre-commit hook is ignored when running on Github Workflow + # because goalngci-lint github action is much more useful than the pre-commit action. + # The trick is to run github action only for "manual" hook stage + - repo: https://github.com/golangci/golangci-lint + rev: v2.5.0 hooks: - - id: golangci-lint - stages: [commit] + - id: golangci-lint + stages: [pre-commit] diff --git a/Dockerfile b/Dockerfile index c6e184c465..b434ea3b28 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:alpine as build +FROM golang:1.24.5-alpine as build RUN apk --no-cache add ca-certificates RUN mkdir /newtmp && chown 1777 /newtmp diff --git a/Dockerfile.dev b/Dockerfile.dev index 9aa516dc2c..e60f61c0f8 100644 --- a/Dockerfile.dev +++ b/Dockerfile.dev @@ -1,4 +1,4 @@ -FROM golang:alpine as build +FROM golang:1.24.5-alpine as build RUN apk --no-cache add ca-certificates WORKDIR /build diff --git a/Dockerfile.mcp b/Dockerfile.mcp new file mode 100644 index 0000000000..2e25de6416 --- /dev/null +++ b/Dockerfile.mcp @@ -0,0 +1,5 @@ +FROM node:slim +COPY flyctl /usr/bin +COPY --from=ghcr.io/astral-sh/uv:debian /usr/local/bin/uv* /usr/local/bin +EXPOSE 8080 +ENTRYPOINT [ "/usr/bin/flyctl", "mcp", "wrap", "--" ] diff --git a/README.md b/README.md index 0485e3625b..a10524f213 100644 --- a/README.md +++ b/README.md @@ -96,11 +96,5 @@ app: banana There is a simple Powershell script, `winbuild.ps1`, which will run the code generation for the help files, format them, and run a full build, leaving a new binary in the bin directory. -## Running from branches on your local machine - -Run `scripts/build-dfly` to build a Docker image from the current branch. Then, use `scripts/dfly` to run it. This assumes you are already -authenticated to Fly in your local environment. - - ## Contributing guide See [CONTRIBUTING.md](./CONTRIBUTING.md) diff --git a/agent/client.go b/agent/client.go index 0477d93c0d..5ca64a455a 100644 --- a/agent/client.go +++ b/agent/client.go @@ -16,6 +16,7 @@ import ( "time" "github.com/azazeal/pause" + "github.com/fsnotify/fsnotify" "golang.org/x/sync/errgroup" "github.com/superfly/flyctl/agent/internal/proto" @@ -51,11 +52,14 @@ func Establish(ctx context.Context, apiClient wireguard.WebClient) (*Client, err return nil, err } - if buildinfo.Version().Equal(resVer) { + // Seems like the flyctl agent version is set when the agent is started, + // so, in development, it can't ever be equal to the flyctl version, + // which seems to be set at build time. + if buildinfo.Version().Equal(resVer) || (buildinfo.Version().Channel == "dev" && buildinfo.Version().Newer(resVer)) { return c, nil } - // TOOD: log this instead + // TODO: log this instead msg := fmt.Sprintf("The running flyctl agent (v%s) is older than the current flyctl (v%s).", res.Version, buildinfo.Version()) logger := logger.MaybeFromContext(ctx) @@ -88,12 +92,41 @@ func Establish(ctx context.Context, apiClient wireguard.WebClient) (*Client, err return nil, err } - // this is gross, but we need to wait for the agent to exit - pause.For(ctx, time.Second) + // wait for the agent to exit + waitUntilDeleted(ctx, PathToSocket(), time.Second) return StartDaemon(ctx) } +// Use fsnotify to wait until a file is deleted, fallback to a timeout on any error. +func waitUntilDeleted(ctx context.Context, path string, timeout time.Duration) { + watcher, err := fsnotify.NewWatcher() + if err != nil { + time.Sleep(timeout) + return + } + defer watcher.Close() + + if err = watcher.Add(path); err != nil { + return + } + t := time.NewTimer(timeout) + defer t.Stop() + + for { + select { + case <-t.C: + return + case <-ctx.Done(): + return + case event := <-watcher.Events: + if event.Has(fsnotify.Remove) { + return + } + } + } +} + func newClient(network, addr string) *Client { return &Client{ network: network, diff --git a/agent/remote.go b/agent/remote.go new file mode 100644 index 0000000000..dd1040f907 --- /dev/null +++ b/agent/remote.go @@ -0,0 +1,42 @@ +package agent + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + "github.com/superfly/fly-go" + "github.com/superfly/flyctl/internal/flyutil" + "github.com/superfly/flyctl/iostreams" +) + +func BringUpAgent(ctx context.Context, client flyutil.Client, app *fly.AppCompact, network string, quiet bool) (*Client, Dialer, error) { + io := iostreams.FromContext(ctx) + + agentclient, err := Establish(ctx, client) + slug := app.Organization.Slug + name := app.Name + if err != nil { + captureError(ctx, err, "agent-remote", slug, name) + return nil, nil, errors.Wrap(err, "can't establish agent") + } + + dialer, err := agentclient.Dialer(ctx, slug, network) + if err != nil { + captureError(ctx, err, "agent-remote", slug, name) + return nil, nil, fmt.Errorf("ssh: can't build tunnel for %s: %s\n", slug, err) + } + + if !quiet { + io.StartProgressIndicatorMsg("Connecting to tunnel") + } + if err := agentclient.WaitForTunnel(ctx, slug, network); err != nil { + captureError(ctx, err, "agent-remote", slug, name) + return nil, nil, errors.Wrapf(err, "tunnel unavailable") + } + if !quiet { + io.StopProgressIndicator() + } + + return agentclient, dialer, nil +} diff --git a/deploy.rb b/deploy.rb index 5baa679103..b89947f216 100755 --- a/deploy.rb +++ b/deploy.rb @@ -91,6 +91,7 @@ in_step Step::GIT_PULL do ref = get_env("GIT_REF") artifact Artifact::GIT_INFO, { repository: GIT_REPO, reference: ref } + exec_capture("git init", log: false) redacted_repo_url = GIT_REPO_URL.dup diff --git a/deployer.Dockerfile b/deployer.Dockerfile index 4942e0df97..dafc4ddcfb 100644 --- a/deployer.Dockerfile +++ b/deployer.Dockerfile @@ -13,7 +13,7 @@ RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ dpkg-reconfigure --frontend=noninteractive locales && \ update-locale LANG=en_US.UTF-8 -ENV LANG en_US.UTF-8 +ENV LANG en_US.UTF-8 # configure git a bit RUN git config --global advice.detachedHead false && \ @@ -24,7 +24,7 @@ ENV DEFAULT_RUBY_VERSION=3.1.6 \ DEFAULT_ERLANG_VERSION=26.2.5.2 \ DEFAULT_ELIXIR_VERSION=1.16 \ DEFAULT_BUN_VERSION=1.1.24 \ - DEFAULT_PHP_VERSION=8.1.0 \ + DEFAULT_PHP_VERSION=8.1.0 \ DEFAULT_PYTHON_VERSION=3.12 ARG NODE_BUILD_VERSION=5.3.8 @@ -73,4 +73,4 @@ RUN mkdir -p /usr/src/app # need a login shell for rvm to work properly... ENTRYPOINT ["/bin/bash", "--login", "-c"] -CMD ["/deploy.rb"] \ No newline at end of file +CMD ["/deploy.rb"] diff --git a/deployer.Dockerfile.dockerignore b/deployer.Dockerfile.dockerignore index 03767f328c..d8cc33826e 100644 --- a/deployer.Dockerfile.dockerignore +++ b/deployer.Dockerfile.dockerignore @@ -2,4 +2,4 @@ !/bin/flyctl !deploy.rb -!deploy \ No newline at end of file +!deploy diff --git a/doc/main.go b/doc/main.go index 7b9ea65e92..e76399dac9 100644 --- a/doc/main.go +++ b/doc/main.go @@ -93,9 +93,9 @@ func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) // If it's runnable, show the useline otherwise show a version with [command] if cmd.Runnable() { - buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.UseLine())) + fmt.Fprintf(buf, "```\n%s\n```\n\n", cmd.UseLine()) } else { - buf.WriteString(fmt.Sprintf("```\n%s [command] [flags]\n```", cmd.CommandPath()) + "\n\n") + fmt.Fprintf(buf, "```\n%s [command] [flags]\n```\n\n", cmd.CommandPath()) } } @@ -110,15 +110,15 @@ func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) } cname := name + " " + child.Name() link := cname + ".md" - link = strings.Replace(link, " ", "_", -1) - buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", child.Name(), linkHandler(link), child.Short)) + link = strings.ReplaceAll(link, " ", "_") + fmt.Fprintf(buf, "* [%s](%s)\t - %s\n", child.Name(), linkHandler(link), child.Short) } buf.WriteString("\n") } if len(cmd.Example) > 0 { buf.WriteString(titlePrefix + "Examples\n\n") - buf.WriteString(fmt.Sprintf("```\n%s\n```\n\n", cmd.Example)) + fmt.Fprintf(buf, "```\n%s\n```\n\n", cmd.Example) } if err := printOptions(buf, cmd, name); err != nil { @@ -130,8 +130,8 @@ func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) parent := cmd.Parent() pname := parent.CommandPath() link := pname + ".md" - link = strings.Replace(link, " ", "_", -1) - buf.WriteString(fmt.Sprintf("* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short)) + link = strings.ReplaceAll(link, " ", "_") + fmt.Fprintf(buf, "* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short) cmd.VisitParents(func(c *cobra.Command) { if c.DisableAutoGenTag { cmd.DisableAutoGenTag = c.DisableAutoGenTag @@ -172,7 +172,7 @@ func GenMarkdownTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHa } } - basename := strings.Replace(cmd.CommandPath(), " ", "_", -1) + ".md" + basename := strings.ReplaceAll(cmd.CommandPath(), " ", "_") + ".md" filename := filepath.Join(dir, basename) f, err := os.Create(filename) if err != nil { diff --git a/flyctl/config.go b/flyctl/config.go index bd804e82f3..05e6000181 100644 --- a/flyctl/config.go +++ b/flyctl/config.go @@ -17,6 +17,7 @@ const ( ConfigWireGuardState = "wire_guard_state" ConfigWireGuardWebsockets = "wire_guard_websockets" + ConfigAppSecretsMinvers = "app_secrets_minvers" ConfigRegistryHost = "registry_host" ) diff --git a/flyctl/flyctl.go b/flyctl/flyctl.go index 6a09ed786f..79434af974 100644 --- a/flyctl/flyctl.go +++ b/flyctl/flyctl.go @@ -10,7 +10,7 @@ import ( "github.com/superfly/flyctl/helpers" "github.com/superfly/flyctl/internal/instrument" "github.com/superfly/flyctl/terminal" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" ) var configDir string @@ -115,7 +115,7 @@ func loadConfig() error { return err } -var writeableConfigKeys = []string{ConfigAPIToken, ConfigInstaller, ConfigWireGuardState, ConfigWireGuardWebsockets, BuildKitNodeID} +var writeableConfigKeys = []string{ConfigAPIToken, ConfigInstaller, ConfigAppSecretsMinvers, ConfigWireGuardState, ConfigWireGuardWebsockets, BuildKitNodeID} func saveConfig() error { out := map[string]interface{}{} diff --git a/flypg/launcher.go b/flypg/launcher.go index b95de8c445..a6436d6d95 100644 --- a/flypg/launcher.go +++ b/flypg/launcher.go @@ -10,6 +10,7 @@ import ( fly "github.com/superfly/fly-go" "github.com/superfly/flyctl/helpers" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/buildinfo" "github.com/superfly/flyctl/internal/flapsutil" "github.com/superfly/flyctl/internal/flyutil" @@ -128,11 +129,6 @@ func (l *Launcher) LaunchMachinesPostgres(ctx context.Context, config *CreateClu } } - secrets, err := l.setSecrets(ctx, config) - if err != nil { - return err - } - flapsClient, err := flapsutil.NewClientWithOptions(ctx, flaps.NewClientOpts{ AppCompact: app, AppName: app.Name, @@ -142,6 +138,11 @@ func (l *Launcher) LaunchMachinesPostgres(ctx context.Context, config *CreateClu } ctx = flapsutil.NewContextWithClient(ctx, flapsClient) + secrets, err := l.setSecrets(ctx, config) + if err != nil { + return err + } + nodes := make([]*fly.Machine, 0) for i := 0; i < config.InitialClusterSize; i++ { @@ -245,9 +246,14 @@ func (l *Launcher) LaunchMachinesPostgres(ctx context.Context, config *CreateClu Path: volumePath, }) + minvers, err := appsecrets.GetMinvers(app.Name) + if err != nil { + return err + } launchInput := fly.LaunchMachineInput{ - Region: config.Region, - Config: machineConf, + Region: config.Region, + Config: machineConf, + MinSecretsVersion: minvers, } machine, err := flapsClient.Launch(ctx, launchInput) @@ -421,6 +427,10 @@ func (l *Launcher) createApp(ctx context.Context, config *CreateClusterInput) (* } func (l *Launcher) setSecrets(ctx context.Context, config *CreateClusterInput) (map[string]string, error) { + flapsClient := flapsutil.ClientFromContext(ctx) + if flapsClient == nil { + return nil, fmt.Errorf("missing flaps client in context") + } out := iostreams.FromContext(ctx).Out fmt.Fprintf(out, "Setting secrets on app %s...\n", config.AppName) @@ -494,8 +504,7 @@ func (l *Launcher) setSecrets(ctx context.Context, config *CreateClusterInput) ( secrets["OPERATOR_PASSWORD"] = config.Password } - _, err = l.client.SetSecrets(ctx, config.AppName, secrets) - + err = appsecrets.Update(ctx, flapsClient, config.AppName, secrets, nil) return secrets, err } diff --git a/go.mod b/go.mod index ef87c45a39..ce6998deb6 100644 --- a/go.mod +++ b/go.mod @@ -1,42 +1,41 @@ module github.com/superfly/flyctl -go 1.23 - -toolchain go1.23.3 +go 1.24.5 require ( github.com/AlecAivazis/survey/v2 v2.3.7 - github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 - github.com/Khan/genqlient v0.7.1-0.20240819060157-4466fc10e4f3 + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c + github.com/Khan/genqlient v0.8.1 github.com/MakeNowJust/heredoc/v2 v2.0.1 github.com/Microsoft/go-winio v0.6.2 github.com/PuerkitoBio/rehttp v1.4.0 github.com/alecthomas/chroma v0.10.0 - github.com/avast/retry-go/v4 v4.6.0 - github.com/aws/aws-sdk-go-v2/config v1.28.6 - github.com/aws/aws-sdk-go-v2/credentials v1.17.47 - github.com/aws/aws-sdk-go-v2/service/s3 v1.71.0 + github.com/avast/retry-go/v4 v4.6.1 + github.com/aws/aws-sdk-go-v2/config v1.31.6 + github.com/aws/aws-sdk-go-v2/credentials v1.18.10 + github.com/aws/aws-sdk-go-v2/service/s3 v1.87.3 github.com/azazeal/pause v1.3.0 github.com/blang/semver v3.5.1+incompatible - github.com/briandowns/spinner v1.23.1 - github.com/buildpacks/pack v0.36.0 + github.com/briandowns/spinner v1.23.2 + github.com/buildpacks/pack v0.36.4 github.com/cavaliergopher/grab/v3 v3.0.1 - github.com/cenkalti/backoff v2.2.1+incompatible + github.com/cenkalti/backoff/v5 v5.0.3 github.com/chzyer/readline v1.5.1 github.com/cli/safeexec v1.0.1 - github.com/coder/websocket v1.8.12 + github.com/coder/websocket v1.8.13 github.com/containerd/continuity v0.4.5 - github.com/depot/depot-go v0.5.0 - github.com/docker/docker v27.4.0-rc.4+incompatible + github.com/depot/depot-go v0.5.1 + github.com/docker/docker v27.5.1+incompatible github.com/docker/go-connections v0.5.0 github.com/docker/go-units v0.5.0 github.com/dustin/go-humanize v1.0.1 github.com/ejcx/sshcert v1.1.0 - github.com/getsentry/sentry-go v0.30.0 + github.com/fsnotify/fsnotify v1.9.0 + github.com/getsentry/sentry-go v0.32.0 github.com/go-kit/log v0.2.1 - github.com/go-logr/logr v1.4.2 + github.com/go-logr/logr v1.4.3 github.com/gofrs/flock v0.12.1 - github.com/google/go-cmp v0.6.0 + github.com/google/go-cmp v0.7.0 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/haileys/go-harlog v0.0.0-20230517070437-0f99204b5a57 github.com/hashicorp/go-multierror v1.1.1 @@ -46,67 +45,76 @@ require ( github.com/jinzhu/copier v0.4.0 github.com/jpillora/backoff v1.0.0 github.com/kr/text v0.2.0 - github.com/launchdarkly/go-sdk-common/v3 v3.2.0 + github.com/launchdarkly/go-sdk-common/v3 v3.4.0 github.com/logrusorgru/aurora v2.0.3+incompatible - github.com/mattn/go-colorable v0.1.13 + github.com/mattn/go-colorable v0.1.14 github.com/mattn/go-isatty v0.0.20 github.com/mattn/go-zglob v0.0.6 github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d - github.com/miekg/dns v1.1.62 - github.com/moby/buildkit v0.18.1 + github.com/miekg/dns v1.1.64 + github.com/moby/buildkit v0.20.2 github.com/moby/patternmatcher v0.6.0 github.com/morikuni/aec v1.0.0 - github.com/muesli/termenv v0.15.2 - github.com/nats-io/nats.go v1.37.1-0.20241121095519-e963b776f24f + github.com/muesli/termenv v0.16.0 + github.com/nats-io/nats.go v1.43.0 github.com/novln/docker-parser v1.0.0 github.com/oklog/ulid/v2 v2.1.0 github.com/olekukonko/tablewriter v0.0.5 github.com/opencontainers/image-spec v1.1.0 - github.com/pelletier/go-toml/v2 v2.2.3 + github.com/pelletier/go-toml/v2 v2.2.4 github.com/pkg/errors v0.9.1 - github.com/pkg/sftp v1.13.7 + github.com/pkg/sftp v1.13.9 github.com/prometheus/blackbox_exporter v0.25.0 - github.com/prometheus/client_golang v1.20.5 - github.com/prometheus/client_model v0.6.1 + github.com/prometheus/client_golang v1.22.0 + github.com/prometheus/client_model v0.6.2 github.com/r3labs/diff v1.1.0 - github.com/samber/lo v1.47.0 + github.com/samber/lo v1.49.1 github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 github.com/sourcegraph/conc v0.3.0 - github.com/spf13/cobra v1.8.1 - github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.19.0 - github.com/stretchr/testify v1.10.0 - github.com/superfly/fly-go v0.1.36 - github.com/superfly/graphql v0.2.4 + github.com/spf13/cobra v1.9.1 + github.com/spf13/pflag v1.0.9 + github.com/spf13/viper v1.20.1 + github.com/stretchr/testify v1.11.1 + github.com/superfly/fly-go v0.1.57 + github.com/superfly/graphql v0.2.6 github.com/superfly/lfsc-go v0.1.1 - github.com/superfly/macaroon v0.2.14-0.20240819201738-61a02aa53648 + github.com/superfly/macaroon v0.3.0 github.com/superfly/tokenizer v0.0.3-0.20240826174224-a17a2e0a9dc0 - github.com/vektah/gqlparser/v2 v2.5.20 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 - go.opentelemetry.io/otel v1.32.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 - go.opentelemetry.io/otel/sdk v1.32.0 - go.opentelemetry.io/otel/trace v1.32.0 - golang.org/x/crypto v0.30.0 - golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c - golang.org/x/mod v0.22.0 - golang.org/x/net v0.32.0 - golang.org/x/sync v0.10.0 - golang.org/x/sys v0.28.0 - golang.org/x/term v0.27.0 - golang.org/x/text v0.21.0 - golang.org/x/time v0.8.0 + github.com/vektah/gqlparser/v2 v2.5.30 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 + go.opentelemetry.io/otel/sdk v1.38.0 + go.opentelemetry.io/otel/trace v1.38.0 + go.uber.org/mock v0.6.0 + golang.org/x/crypto v0.42.0 + golang.org/x/mod v0.28.0 + golang.org/x/net v0.44.0 + golang.org/x/sync v0.17.0 + golang.org/x/sys v0.36.0 + golang.org/x/term v0.35.0 + golang.org/x/text v0.29.0 + golang.org/x/time v0.13.0 golang.zx2c4.com/wireguard v0.0.0-20231211153847-12269c276173 - google.golang.org/grpc v1.68.1 - gopkg.in/yaml.v2 v2.4.0 + google.golang.org/grpc v1.75.0 gopkg.in/yaml.v3 v3.0.1 ) +require ( + github.com/bahlo/generic-list-go v0.2.0 // indirect + github.com/buger/jsonparser v1.1.1 // indirect + github.com/invopop/jsonschema v0.13.0 // indirect + github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect + github.com/yosida95/uritemplate/v3 v3.0.2 // indirect + golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +) + require ( connectrpc.com/connect v1.16.1 // indirect - dario.cat/mergo v1.0.0 // indirect - github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect + dario.cat/mergo v1.0.1 // indirect + github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.29 // indirect @@ -119,31 +127,31 @@ require ( github.com/BurntSushi/toml v1.4.0 // indirect github.com/GoogleContainerTools/kaniko v1.23.2 // indirect github.com/Masterminds/semver v1.5.0 // indirect - github.com/ProtonMail/go-crypto v1.0.0 // indirect + github.com/ProtonMail/go-crypto v1.1.3 // indirect github.com/agext/levenshtein v1.2.3 // indirect - github.com/agnivade/levenshtein v1.2.0 // indirect + github.com/agnivade/levenshtein v1.2.1 // indirect github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect github.com/alexflint/go-arg v1.5.1 // indirect github.com/alexflint/go-scalar v1.2.0 // indirect github.com/andybalholm/brotli v1.1.0 // indirect - github.com/apex/log v1.9.0 // indirect - github.com/aws/aws-sdk-go-v2 v1.32.6 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.25 // indirect + github.com/apex/log v1.9.0 + github.com/aws/aws-sdk-go-v2 v1.38.3 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.6 // indirect github.com/aws/aws-sdk-go-v2/service/ecr v1.27.4 // indirect github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.6 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.6 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 // indirect - github.com/aws/smithy-go v1.22.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.6 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.6 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.29.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.38.2 // indirect + github.com/aws/smithy-go v1.23.0 // indirect github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20240419161514-af205d85bb44 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -153,51 +161,52 @@ require ( github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 // indirect - github.com/cloudflare/circl v1.3.7 // indirect + github.com/cloudflare/circl v1.6.1 // indirect github.com/containerd/console v1.0.4 // indirect - github.com/containerd/containerd v1.7.24 // indirect - github.com/containerd/containerd/api v1.7.19 // indirect - github.com/containerd/errdefs v0.3.0 // indirect + github.com/containerd/containerd v1.7.27 // indirect + github.com/containerd/containerd/api v1.8.0 + github.com/containerd/containerd/v2 v2.0.5 // indirect + github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect - github.com/containerd/platforms v0.2.1 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect - github.com/containerd/ttrpc v1.2.5 // indirect + github.com/containerd/platforms v1.0.0-rc.1 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect + github.com/containerd/ttrpc v1.2.7 // indirect github.com/containerd/typeurl/v2 v2.2.3 // indirect - github.com/cyphar/filepath-securejoin v0.2.4 // indirect + github.com/cyphar/filepath-securejoin v0.3.6 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dimchansky/utfbom v1.1.1 // indirect github.com/distribution/reference v0.6.0 // indirect github.com/dlclark/regexp2 v1.4.0 // indirect - github.com/docker/cli v27.4.0-rc.2+incompatible // indirect + github.com/docker/cli v27.5.1+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.2 // indirect github.com/docker/go-metrics v0.0.1 // indirect - github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a // indirect + github.com/elazarl/goproxy v1.2.3 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/fatih/color v1.15.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/gdamore/encoding v1.0.0 // indirect - github.com/gdamore/tcell/v2 v2.7.4 // indirect + github.com/gdamore/encoding v1.0.1 // indirect + github.com/gdamore/tcell/v2 v2.8.0 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect - github.com/go-git/go-billy/v5 v5.5.0 // indirect - github.com/go-git/go-git/v5 v5.12.0 // indirect + github.com/go-git/go-billy/v5 v5.6.1 // indirect + github.com/go-git/go-git/v5 v5.13.1 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-viper/mapstructure/v2 v2.2.1 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.5.1 // indirect + github.com/golang-jwt/jwt/v4 v4.5.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.0.1 // indirect - github.com/google/go-containerregistry v0.20.2 // indirect + github.com/google/go-containerregistry v0.20.2 github.com/google/go-querystring v1.1.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect github.com/heroku/color v0.0.6 // indirect github.com/in-toto/in-toto-golang v0.5.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -206,75 +215,72 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect - github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/compress v1.18.0 // indirect github.com/kr/fs v0.1.0 // indirect github.com/launchdarkly/go-jsonstream/v3 v3.0.0 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect - github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-runewidth v0.0.15 // indirect + github.com/mark3labs/mcp-go v0.39.1 + github.com/mattn/go-runewidth v0.0.16 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/ioprogress v0.0.0-20180201004757-6a23b12fa88e // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/locker v1.0.1 // indirect github.com/moby/sys/sequential v0.6.0 // indirect github.com/moby/sys/signal v0.7.1 // indirect github.com/moby/sys/user v0.3.0 // indirect github.com/moby/sys/userns v0.1.0 // indirect - github.com/moby/term v0.5.0 // indirect + github.com/moby/term v0.5.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect - github.com/nats-io/nkeys v0.4.8 // indirect + github.com/nats-io/nkeys v0.4.11 // indirect github.com/nats-io/nuid v1.0.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/selinux v1.11.0 // indirect + github.com/opencontainers/selinux v1.11.1 // indirect github.com/pierrec/lz4/v4 v4.1.17 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/rivo/tview v0.0.0-20220307222120-9994674d60a8 // indirect - github.com/rivo/uniseg v0.4.3 // indirect + github.com/rivo/uniseg v0.4.7 // indirect github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect - github.com/sagikazarmark/locafero v0.4.0 // indirect - github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sagikazarmark/locafero v0.7.0 // indirect github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/skeema/knownhosts v1.2.2 // indirect - github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.6.0 // indirect + github.com/skeema/knownhosts v1.3.0 // indirect + github.com/spf13/afero v1.12.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/superfly/ltx v0.3.12 // indirect - github.com/tonistiigi/fsutil v0.0.0-20241121093142-31cf1f437184 // indirect + github.com/tonistiigi/fsutil v0.0.0-20250113203817-b14e27f4135a // indirect github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4 // indirect github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab // indirect - github.com/vbatts/tar-split v0.11.5 // indirect + github.com/vbatts/tar-split v0.11.6 // indirect github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.46.1 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect - go.opentelemetry.io/otel/metric v1.32.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/proto/otlp v1.7.1 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect - golang.org/x/oauth2 v0.24.0 // indirect - golang.org/x/tools v0.26.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/tools v0.36.0 // indirect golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect - google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/protobuf v1.35.1 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/protobuf v1.36.8 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gvisor.dev/gvisor v0.0.0-20230927004350-cbd86285d259 // indirect ) diff --git a/go.sum b/go.sum index 46de3b7677..f61f1f4c3c 100644 --- a/go.sum +++ b/go.sum @@ -1,17 +1,17 @@ connectrpc.com/connect v1.16.1 h1:rOdrK/RTI/7TVnn3JsVxt3n028MlTRwmK5Q4heSpjis= connectrpc.com/connect v1.16.1/go.mod h1:XpZAduBQUySsb4/KO5JffORVkDI4B6/EYPi7N8xpNZw= -dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= -dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= -github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 h1:59MxjQVfjXsBpLy+dbd2/ELV5ofnUkUZBvWSC85sheA= -github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20231105174938-2b5cbb29f3e2 h1:dIScnXFlF784X79oi7MzVT6GWqr/W1uUt0pB5CsDs9M= +github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20231105174938-2b5cbb29f3e2/go.mod h1:gCLVsLfv1egrcZu+GoJATN5ts75F2s62ih/457eWzOw= github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ= github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= @@ -39,8 +39,8 @@ github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0 github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/GoogleContainerTools/kaniko v1.23.2 h1:/gu6xNhod6IfrZp9Pwlo9gDlqwu7YUsTec6TZTgOw6Q= github.com/GoogleContainerTools/kaniko v1.23.2/go.mod h1:jmMu5xcyuxDmDT2waMc8MSnSFBVRHHupp+0T12Oddsc= -github.com/Khan/genqlient v0.7.1-0.20240819060157-4466fc10e4f3 h1:tLgg6xDhCddhmU3rT1bVOv0VeTU5i1rCXPHbWT8ugD0= -github.com/Khan/genqlient v0.7.1-0.20240819060157-4466fc10e4f3/go.mod h1:jNiMcTbO4wd9h1jIjEe5+k+au3kC4WasHBgmy/N/lto= +github.com/Khan/genqlient v0.8.1 h1:wtOCc8N9rNynRLXN3k3CnfzheCUNKBcvXmVv5zt6WCs= +github.com/Khan/genqlient v0.8.1/go.mod h1:R2G6DzjBvCbhjsEajfRjbWdVglSH/73kSivC9TLWVjU= github.com/MakeNowJust/heredoc/v2 v2.0.1 h1:rlCHh70XXXv7toz95ajQWOWQnN4WNLt0TdpZYIR/J6A= github.com/MakeNowJust/heredoc/v2 v2.0.1/go.mod h1:6/2Abh5s+hc3g9nbWLe9ObDIOhaRrqsyY9MWy+4JdRM= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= @@ -48,18 +48,18 @@ github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF0 github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/Microsoft/hcsshim v0.12.8 h1:BtDWYlFMcWhorrvSSo2M7z0csPdw6t7no/C3FsSvqiI= -github.com/Microsoft/hcsshim v0.12.8/go.mod h1:cibQ4BqhJ32FXDwPdQhKhwrwophnh3FuT4nwQZF907w= +github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg= +github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= -github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78= -github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/ProtonMail/go-crypto v1.1.3 h1:nRBOetoydLeUb4nHajyO2bKqMLfWQ/ZPwkXqXxPxCFk= +github.com/ProtonMail/go-crypto v1.1.3/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/PuerkitoBio/rehttp v1.4.0 h1:rIN7A2s+O9fmHUM1vUcInvlHj9Ysql4hE+Y0wcl/xk8= github.com/PuerkitoBio/rehttp v1.4.0/go.mod h1:LUwKPoDbDIA2RL5wYZCNsQ90cx4OJ4AWBmq6KzWZL1s= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/agnivade/levenshtein v1.2.0 h1:U9L4IOT0Y3i0TIlUIDJ7rVUziKi/zPbrJGaFrtYH3SY= -github.com/agnivade/levenshtein v1.2.0/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= +github.com/agnivade/levenshtein v1.2.1 h1:EHBY3UOn1gwdy/VbFwgo4cxecRznFk7fKWN1KOX7eoM= +github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= github.com/alecthomas/assert/v2 v2.3.0 h1:mAsH2wmvjsuvyBvAmCtm7zFsBlb8mIHx5ySLVdDZXL0= github.com/alecthomas/assert/v2 v2.3.0/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= github.com/alecthomas/chroma v0.10.0 h1:7XDcGkCQopCNKjZHfYrNLraA+M7e0fMiJ/Mfikbfjek= @@ -91,49 +91,49 @@ github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/avast/retry-go/v4 v4.6.0 h1:K9xNA+KeB8HHc2aWFuLb25Offp+0iVRXEvFx8IinRJA= -github.com/avast/retry-go/v4 v4.6.0/go.mod h1:gvWlPhBVsvBbLkVGDg/KwvBv0bEkCOLRRSHKIr2PyOE= +github.com/avast/retry-go/v4 v4.6.1 h1:VkOLRubHdisGrHnTu89g08aQEWEgRU7LVEop3GbIcMk= +github.com/avast/retry-go/v4 v4.6.1/go.mod h1:V6oF8njAwxJ5gRo1Q7Cxab24xs5NCWZBeaHHBklR8mA= github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go-v2 v1.32.6 h1:7BokKRgRPuGmKkFMhEg/jSul+tB9VvXhcViILtfG8b4= -github.com/aws/aws-sdk-go-v2 v1.32.6/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 h1:lL7IfaFzngfx0ZwUGOZdsFFnQ5uLvR0hWqqhyE7Q9M8= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7/go.mod h1:QraP0UcVlQJsmHfioCrveWOC1nbiWUl3ej08h4mXWoc= -github.com/aws/aws-sdk-go-v2/config v1.28.6 h1:D89IKtGrs/I3QXOLNTH93NJYtDhm8SYa9Q5CsPShmyo= -github.com/aws/aws-sdk-go-v2/config v1.28.6/go.mod h1:GDzxJ5wyyFSCoLkS+UhGB0dArhb9mI+Co4dHtoTxbko= -github.com/aws/aws-sdk-go-v2/credentials v1.17.47 h1:48bA+3/fCdi2yAwVt+3COvmatZ6jUDNkDTIsqDiMUdw= -github.com/aws/aws-sdk-go-v2/credentials v1.17.47/go.mod h1:+KdckOejLW3Ks3b0E3b5rHsr2f9yuORBum0WPnE5o5w= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 h1:AmoU1pziydclFT/xRV+xXE/Vb8fttJCLRPv8oAkprc0= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21/go.mod h1:AjUdLYe4Tgs6kpH4Bv7uMZo7pottoyHMn4eTcIcneaY= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 h1:s/fF4+yDQDoElYhfIVvSNyeCydfbuTKzhxSXDXCPasU= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25/go.mod h1:IgPfDv5jqFIzQSNbUEMoitNooSMXjRSDkhXv8jiROvU= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 h1:ZntTCl5EsYnhN/IygQEUugpdwbhdkom9uHcbCftiGgA= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25/go.mod h1:DBdPrgeocww+CSl1C8cEV8PN1mHMBhuCDLpXezyvWkE= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.25 h1:r67ps7oHCYnflpgDy2LZU0MAQtQbYIOqNNnqGO6xQkE= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.25/go.mod h1:GrGY+Q4fIokYLtjCVB/aFfCVL6hhGUFl8inD18fDalE= +github.com/aws/aws-sdk-go-v2 v1.38.3 h1:B6cV4oxnMs45fql4yRH+/Po/YU+597zgWqvDpYMturk= +github.com/aws/aws-sdk-go-v2 v1.38.3/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 h1:i8p8P4diljCr60PpJp6qZXNlgX4m2yQFpYk+9ZT+J4E= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1/go.mod h1:ddqbooRZYNoJ2dsTwOty16rM+/Aqmk/GOXrK8cg7V00= +github.com/aws/aws-sdk-go-v2/config v1.31.6 h1:a1t8fXY4GT4xjyJExz4knbuoxSCacB5hT/WgtfPyLjo= +github.com/aws/aws-sdk-go-v2/config v1.31.6/go.mod h1:5ByscNi7R+ztvOGzeUaIu49vkMk2soq5NaH5PYe33MQ= +github.com/aws/aws-sdk-go-v2/credentials v1.18.10 h1:xdJnXCouCx8Y0NncgoptztUocIYLKeQxrCgN6x9sdhg= +github.com/aws/aws-sdk-go-v2/credentials v1.18.10/go.mod h1:7tQk08ntj914F/5i9jC4+2HQTAuJirq7m1vZVIhEkWs= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6 h1:wbjnrrMnKew78/juW7I2BtKQwa1qlf6EjQgS69uYY14= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6/go.mod h1:AtiqqNrDioJXuUgz3+3T0mBWN7Hro2n9wll2zRUc0ww= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6 h1:uF68eJA6+S9iVr9WgX1NaRGyQ/6MdIyc4JNUo6TN1FA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6/go.mod h1:qlPeVZCGPiobx8wb1ft0GHT5l+dc6ldnwInDFaMvC7Y= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6 h1:pa1DEC6JoI0zduhZePp3zmhWvk/xxm4NB8Hy/Tlsgos= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6/go.mod h1:gxEjPebnhWGJoaDdtDkA0JX46VRg1wcTHYe63OfX5pE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.6 h1:R0tNFJqfjHL3900cqhXuwQ+1K4G0xc9Yf8EDbFXCKEw= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.6/go.mod h1:y/7sDdu+aJvPtGXr4xYosdpq9a6T9Z0jkXfugmti0rI= github.com/aws/aws-sdk-go-v2/service/ecr v1.27.4 h1:Qr9W21mzWT3RhfYn9iAux7CeRIdbnTAqmiOlASqQgZI= github.com/aws/aws-sdk-go-v2/service/ecr v1.27.4/go.mod h1:if7ybzzjOmDB8pat9FE35AHTY6ZxlYSy3YviSmFZv8c= github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.4 h1:aNuiieMaS2IHxqAsTdM/pjHyY1aoaDLBGLqpNnFMMqk= github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.4/go.mod h1:8pvvNAklmq+hKmqyvFoMRg0bwg9sdGOvdwximmKiKP0= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.6 h1:HCpPsWqmYQieU7SS6E9HXfdAMSud0pteVXieJmcpIRI= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.6/go.mod h1:ngUiVRCco++u+soRRVBIvBZxSMMvOVMXA4PJ36JLfSw= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 h1:50+XsN70RS7dwJ2CkVNXzj7U2L1HKP8nqTd3XWEXBN4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6/go.mod h1:WqgLmwY7so32kG01zD8CPTJWVWM+TzJoOVHwTg4aPug= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.6 h1:BbGDtTi0T1DYlmjBiCr/le3wzhA37O8QTC5/Ab8+EXk= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.6/go.mod h1:hLMJt7Q8ePgViKupeymbqI0la+t9/iYFBjxQCFwuAwI= -github.com/aws/aws-sdk-go-v2/service/s3 v1.71.0 h1:nyuzXooUNJexRT0Oy0UQY6AhOzxPxhtt4DcBIHyCnmw= -github.com/aws/aws-sdk-go-v2/service/s3 v1.71.0/go.mod h1:sT/iQz8JK3u/5gZkT+Hmr7GzVZehUMkRZpOaAwYXeGY= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 h1:rLnYAfXQ3YAccocshIH5mzNNwZBkBo+bP6EhIxak6Hw= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.7/go.mod h1:ZHtuQJ6t9A/+YDuxOLnbryAmITtr8UysSny3qcyvJTc= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 h1:JnhTZR3PiYDNKlXy50/pNeix9aGMo6lLpXwJ1mw8MD4= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6/go.mod h1:URronUEGfXZN1VpdktPSD1EkAL9mfrV+2F4sjH38qOY= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 h1:s4074ZO1Hk8qv65GqNXqDjmkf4HSQqJukaLuuW0TpDA= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.2/go.mod h1:mVggCnIWoM09jP71Wh+ea7+5gAp53q+49wDFs1SW5z8= -github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= -github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.6 h1:hncKj/4gR+TPauZgTAsxOxNcvBayhUlYZ6LO/BYiQ30= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.6/go.mod h1:OiIh45tp6HdJDDJGnja0mw8ihQGz3VGrUflLqSL0SmM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.6 h1:LHS1YAIJXJ4K9zS+1d/xa9JAA9sL2QyXIQCQFQW/X08= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.6/go.mod h1:c9PCiTEuh0wQID5/KqA32J+HAgZxN9tOGXKCiYJjTZI= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.6 h1:nEXUSAwyUfLTgnc9cxlDWy637qsq4UWwp3sNAfl0Z3Y= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.6/go.mod h1:HGzIULx4Ge3Do2V0FaiYKcyKzOqwrhUZgCI77NisswQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.87.3 h1:ETkfWcXP2KNPLecaDa++5bsQhCRa5M5sLUJa5DWYIIg= +github.com/aws/aws-sdk-go-v2/service/s3 v1.87.3/go.mod h1:+/3ZTqoYb3Ur7DObD00tarKMLMuKg8iqz5CHEanqTnw= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.1 h1:8OLZnVJPvjnrxEwHFg9hVUof/P4sibH+Ea4KKuqAGSg= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.1/go.mod h1:27M3BpVi0C02UiQh1w9nsBEit6pLhlaH3NHna6WUbDE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2 h1:gKWSTnqudpo8dAxqBqZnDoDWCiEh/40FziUjr/mo6uA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2/go.mod h1:x7+rkNmRoEN1U13A6JE2fXne9EWyJy54o3n6d4mGaXQ= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.2 h1:YZPjhyaGzhDQEvsffDEcpycq49nl7fiGcfJTIo8BszI= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.2/go.mod h1:2dIN8qhQfv37BdUYGgEC8Q3tteM3zFxTI1MLO2O3J3c= +github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE= +github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20240419161514-af205d85bb44 h1:oNDkocd5/+6jUuxyz07jQWnKhgpNtKQoZSXKMb7emqQ= github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20240419161514-af205d85bb44/go.mod h1:2nlYPkG0rFrODp6R875pk/kOnB8Ivj3+onhzk2mO57g= github.com/aybabtme/iocontrol v0.0.0-20150809002002-ad15bcfc95a0 h1:0NmehRCgyk5rljDQLKUO+cRJCnduDyn11+zGZIc9Z48= @@ -143,6 +143,8 @@ github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiE github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/azazeal/pause v1.3.0 h1:q901DXvCWqbG19lEUP6fwXbD6KGu96KEGmWmXmurOD8= github.com/azazeal/pause v1.3.0/go.mod h1:zjDZP5dxZndm0bO5zZDhlzqh4zpgSo6M59HNq6UetqI= +github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= +github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -155,21 +157,22 @@ github.com/bmatcuk/doublestar/v4 v4.7.1 h1:fdDeAqgT47acgwd9bd9HxJRDmc9UAmPpc+2m0 github.com/bmatcuk/doublestar/v4 v4.7.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bradleyjkemp/cupaloy/v2 v2.6.0 h1:knToPYa2xtfg42U3I6punFEjaGFKWQRXJwj0JTv4mTs= github.com/bradleyjkemp/cupaloy/v2 v2.6.0/go.mod h1:bm7JXdkRd4BHJk9HpwqAI8BoAY1lps46Enkdqw6aRX0= -github.com/briandowns/spinner v1.23.1 h1:t5fDPmScwUjozhDj4FA46p5acZWIPXYE30qW2Ptu650= -github.com/briandowns/spinner v1.23.1/go.mod h1:LaZeM4wm2Ywy6vO571mvhQNRcWfRUnXOs0RcKV0wYKM= +github.com/briandowns/spinner v1.23.2 h1:Zc6ecUnI+YzLmJniCfDNaMbW0Wid1d5+qcTq4L2FW8w= +github.com/briandowns/spinner v1.23.2/go.mod h1:LaZeM4wm2Ywy6vO571mvhQNRcWfRUnXOs0RcKV0wYKM= +github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/buildpacks/imgutil v0.0.0-20240605145725-186f89b2d168 h1:yVYVi1V7x1bXklOx9lpbTfteyzQKGZC/wkl+IlaVRlU= github.com/buildpacks/imgutil v0.0.0-20240605145725-186f89b2d168/go.mod h1:n2R6VRuWsAX3cyHCp/u0Z4WJcixny0gYg075J39owrk= github.com/buildpacks/lifecycle v0.20.4 h1:VVVTrd9y1LHY3adchh6oktw0wKQuYsWLq3/g23TLaGQ= github.com/buildpacks/lifecycle v0.20.4/go.mod h1:ZsExeEhN+6Qws7iDHJl6PV6zsHycgK/RmDKnRgKQTH0= -github.com/buildpacks/pack v0.36.0 h1:zIGdIMIkSYCJY7G4xz1DaIeE5iKyjZaA4kzSjoFTqFw= -github.com/buildpacks/pack v0.36.0/go.mod h1:Hezzmz5K6JWcWOtsZAFWdptXF5eax5EcMkENXkWZIJA= -github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/buildpacks/pack v0.36.4 h1:otHfS3Ju9TBRTz6LAScXSZw+BEw7BLTB0QPUtWPuiHg= +github.com/buildpacks/pack v0.36.4/go.mod h1:DUFJ5IFnHOtFf+K/wStELnn84kPwSTeMxteXyUJwlRg= github.com/cavaliergopher/grab/v3 v3.0.1 h1:4z7TkBfmPjmLAAmkkAZNX/6QJ1nNFdv3SdIHXju0Fr4= github.com/cavaliergopher/grab/v3 v3.0.1/go.mod h1:1U/KNnD+Ft6JJiYoYBAimKH2XrYptb8Kl3DFGmsjpq4= -github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 h1:krfRl01rzPzxSxyLyrChD+U+MzsBXbm0OwYYB67uF+4= @@ -182,53 +185,58 @@ github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/cli/safeexec v1.0.1 h1:e/C79PbXF4yYTN/wauC4tviMxEV13BwljGj0N9j+N00= github.com/cli/safeexec v1.0.1/go.mod h1:Z/D4tTN8Vs5gXYHDCbaM1S/anmEDnJb1iW0+EJ5zx3Q= -github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= -github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= -github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= +github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= +github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= -github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo= -github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= +github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE= +github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= -github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= -github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= +github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo= +github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro= github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= -github.com/containerd/containerd v1.7.24 h1:zxszGrGjrra1yYJW/6rhm9cJ1ZQ8rkKBR48brqsa7nA= -github.com/containerd/containerd v1.7.24/go.mod h1:7QUzfURqZWCZV7RLNEn1XjUCQLEf0bkaK4GjUaZehxw= -github.com/containerd/containerd/api v1.7.19 h1:VWbJL+8Ap4Ju2mx9c9qS1uFSB1OVYr5JJrW2yT5vFoA= -github.com/containerd/containerd/api v1.7.19/go.mod h1:fwGavl3LNwAV5ilJ0sbrABL44AQxmNjDRcwheXDb6Ig= +github.com/containerd/containerd v1.7.27 h1:yFyEyojddO3MIGVER2xJLWoCIn+Up4GaHFquP7hsFII= +github.com/containerd/containerd v1.7.27/go.mod h1:xZmPnl75Vc+BLGt4MIfu6bp+fy03gdHAn9bz+FreFR0= +github.com/containerd/containerd/api v1.8.0 h1:hVTNJKR8fMc/2Tiw60ZRijntNMd1U+JVMyTRdsD2bS0= +github.com/containerd/containerd/api v1.8.0/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc= +github.com/containerd/containerd/v2 v2.0.5 h1:2vg/TjUXnaohAxiHnthQg8K06L9I4gdYEMcOLiMc8BQ= +github.com/containerd/containerd/v2 v2.0.5/go.mod h1:Qqo0UN43i2fX1FLkrSTCg6zcHNfjN7gEnx3NPRZI+N0= github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4= github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= -github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4= -github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY= github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/containerd/nydus-snapshotter v0.14.0 h1:6/eAi6d7MjaeLLuMO8Udfe5GVsDudmrDNO4SGETMBco= -github.com/containerd/nydus-snapshotter v0.14.0/go.mod h1:TT4jv2SnIDxEBu4H2YOvWQHPOap031ydTaHTuvc5VQk= -github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= -github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= -github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU= -github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk= -github.com/containerd/ttrpc v1.2.5 h1:IFckT1EFQoFBMG4c3sMdT8EP3/aKfumK1msY+Ze4oLU= -github.com/containerd/ttrpc v1.2.5/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= +github.com/containerd/nydus-snapshotter v0.15.0 h1:RqZRs1GPeM6T3wmuxJV9u+2Rg4YETVMwTmiDeX+iWC8= +github.com/containerd/nydus-snapshotter v0.15.0/go.mod h1:biq0ijpeZe0I5yZFSJyHzFSjjRZQ7P7y/OuHyd7hYOw= +github.com/containerd/platforms v1.0.0-rc.1 h1:83KIq4yy1erSRgOVHNk1HYdPvzdJ5CnsWaRoJX4C41E= +github.com/containerd/platforms v1.0.0-rc.1/go.mod h1:J71L7B+aiM5SdIEqmd9wp6THLVRzJGXfNuWCZCllLA4= +github.com/containerd/plugin v1.0.0 h1:c8Kf1TNl6+e2TtMHZt+39yAPDbouRH9WAToRjex483Y= +github.com/containerd/plugin v1.0.0/go.mod h1:hQfJe5nmWfImiqT1q8Si3jLv3ynMUIBB47bQ+KexvO8= +github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= +github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= +github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRqQ= +github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= -github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cyphar/filepath-securejoin v0.3.6 h1:4d9N5ykBnSp5Xn2JkhocYDkOpURL/18CYMpo6xB9uWM= +github.com/cyphar/filepath-securejoin v0.3.6/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/depot/depot-go v0.5.0 h1:OBx/B0DGviHVG+TDmhdpaQA1anTGlQviwBrhR9TKTmQ= -github.com/depot/depot-go v0.5.0/go.mod h1:9xKcGBd3HlDFcFkRbbdOWF/+2bBG0aFtpZAI+5rvfDc= +github.com/depot/depot-go v0.5.1 h1:Kdrsk8q7W2fQvoudWNjxsXG4ZbdlUAa6EV18udDnTFQ= +github.com/depot/depot-go v0.5.1/go.mod h1:QQtSqwRn0flx4KxrUVSJGlh0hTFeZ19MLYvOcJbxtP0= github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7cNTs5R6Hk4V2lcmLz2NsG2VnInyNo= github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= @@ -237,18 +245,16 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dlclark/regexp2 v1.4.0 h1:F1rxgk7p4uKjwIQxBs9oAXe5CqrXlCduYEJvrF4u93E= github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= -github.com/docker/cli v27.4.0-rc.2+incompatible h1:A0GZwegDlt2wdt3tpmrUzkVOZmbhvd7i05wPSf7Oo74= -github.com/docker/cli v27.4.0-rc.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v27.5.1+incompatible h1:JB9cieUT9YNiMITtIsguaN55PLOHhBSz3LKVc6cqWaY= +github.com/docker/cli v27.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.4.0-rc.4+incompatible h1:j9bmeQ/FYeUHlLdMInCkipt2v2Yeypx02ggPmsnP5cU= -github.com/docker/docker v27.4.0-rc.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8= +github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -260,10 +266,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/ejcx/sshcert v1.1.0 h1:y5WW1RKscZWmArFQXMR5dqCWX05MHvJyETCEsSj1ZYs= github.com/ejcx/sshcert v1.1.0/go.mod h1:QrS48TF3vxtR6WZhxxOjYKSu54wKoVaux4UV9TuUDNU= -github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= -github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= -github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2 h1:dWB6v3RcOy03t/bUadywsbyrQwCqZeNIEX6M1OtSZOM= -github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= +github.com/elazarl/goproxy v1.2.3 h1:xwIyKHbaP5yfT6O9KIeYJR5549MXRQkoQMRXGztz8YQ= +github.com/elazarl/goproxy v1.2.3/go.mod h1:YfEbZtqP4AetfO6d40vWchF3znWX7C7Vd6ZMfdL8z64= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -274,27 +278,28 @@ github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= +github.com/gdamore/encoding v1.0.1 h1:YzKZckdBL6jVt2Gc+5p82qhrGiqMdG/eNs6Wy0u3Uhw= +github.com/gdamore/encoding v1.0.1/go.mod h1:0Z0cMFinngz9kS1QfMjCP8TY7em3bZYeeklsSDPivEo= github.com/gdamore/tcell/v2 v2.4.1-0.20210905002822-f057f0a857a1/go.mod h1:Az6Jt+M5idSED2YPGtwnfJV0kXohgdCBPmHGSYc1r04= -github.com/gdamore/tcell/v2 v2.7.4 h1:sg6/UnTM9jGpZU+oFYAsDahfchWAFW8Xx2yFinNSAYU= -github.com/gdamore/tcell/v2 v2.7.4/go.mod h1:dSXtXTSK0VsW1biw65DZLZ2NKr7j0qP/0J7ONmsraWg= -github.com/getsentry/sentry-go v0.30.0 h1:lWUwDnY7sKHaVIoZ9wYqRHJ5iEmoc0pqcRqFkosKzBo= -github.com/getsentry/sentry-go v0.30.0/go.mod h1:WU9B9/1/sHDqeV8T+3VwwbjeR5MSXs/6aqG3mqZrezA= -github.com/gliderlabs/ssh v0.3.7 h1:iV3Bqi942d9huXnzEF2Mt+CY9gLu8DNM4Obd+8bODRE= -github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8= +github.com/gdamore/tcell/v2 v2.8.0 h1:IDclow1j6kKpU/gOhjmc+7Pj5Dxnukb74pfKN4Cxrfg= +github.com/gdamore/tcell/v2 v2.8.0/go.mod h1:bj8ori1BG3OYMjmb3IklZVWfZUJ1UBQt9JXrOCOhGWw= +github.com/getsentry/sentry-go v0.32.0 h1:YKs+//QmwE3DcYtfKRH8/KyOOF/I6Qnx7qYGNHCGmCY= +github.com/getsentry/sentry-go v0.32.0/go.mod h1:CYNcMMz73YigoHljQRG+qPF+eMq8gG72XcGN/p71BAY= +github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= +github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= -github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-billy/v5 v5.6.1 h1:u+dcrgaguSSkbjzHwelEjc0Yj300NUevrrPphk/SoRA= +github.com/go-git/go-billy/v5 v5.6.1/go.mod h1:0AsLr1z2+Uksi4NlElmMblP5rPcDZNRCD8ujZCRR2BE= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= -github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= -github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= +github.com/go-git/go-git/v5 v5.13.1 h1:DAQ9APonnlvSWpvolXWIuV6Q6zXy2wHbN4cVlNR5Q+M= +github.com/go-git/go-git/v5 v5.13.1/go.mod h1:qryJB4cSBoq3FRoBRf5A77joojuBcmPJ0qu3XXXVixc= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= @@ -303,13 +308,13 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= -github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -318,8 +323,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= -github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= +github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= @@ -333,8 +338,9 @@ github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo= github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= @@ -349,21 +355,21 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= github.com/haileys/go-harlog v0.0.0-20230517070437-0f99204b5a57 h1:m7J0Y9Bqry85LVC3uboOp9sDUnH29o85FB5ZoSa3fTg= github.com/haileys/go-harlog v0.0.0-20230517070437-0f99204b5a57/go.mod h1:feJwxrNkN8pzC59AtacOExfyTP4Z5Z44hlgNYEG+KKM= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/heroku/color v0.0.6 h1:UTFFMrmMLFcL3OweqP1lAdp8i1y/9oHqkeHjQ/b/Ny0= github.com/heroku/color v0.0.6/go.mod h1:ZBvOcx7cTF2QKOv4LbmoBtNl5uB17qWxGuzZrsi1wLU= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= @@ -377,6 +383,8 @@ github.com/inancgumus/screen v0.0.0-20190314163918-06e984b86ed3 h1:fO9A67/izFYFY github.com/inancgumus/screen v0.0.0-20190314163918-06e984b86ed3/go.mod h1:Ey4uAp+LvIl+s5jRbOHLcZpUDnkjLBROl15fZLwPlTM= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E= +github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= github.com/itchyny/json2yaml v0.1.4 h1:/pErVOXGG5iTyXHi/QKR4y3uzhLjGTEmmJIy97YT+k8= github.com/itchyny/json2yaml v0.1.4/go.mod h1:6iudhBZdarpjLFRNj+clWLAkGft+9uCcjAZYXUH9eGI= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= @@ -402,8 +410,8 @@ github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4 github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= @@ -420,49 +428,46 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/launchdarkly/go-jsonstream/v3 v3.0.0 h1:qJF/WI09EUJ7kSpmP5d1Rhc81NQdYUhP17McKfUq17E= github.com/launchdarkly/go-jsonstream/v3 v3.0.0/go.mod h1:/1Gyml6fnD309JOvunOSfyysWbZ/ZzcA120gF/cQtC4= -github.com/launchdarkly/go-sdk-common/v3 v3.2.0 h1:LzwlrXRBPC7NjdbnDxio8YGHMvDrNb4i6lbjpLgwsyk= -github.com/launchdarkly/go-sdk-common/v3 v3.2.0/go.mod h1:mXFmDGEh4ydK3QilRhrAyKuf9v44VZQWnINyhqbbOd0= +github.com/launchdarkly/go-sdk-common/v3 v3.4.0 h1:GTRulE0G43xdWY1QdjAXJ7QnZ8PMFU8pOWZICCydEtM= +github.com/launchdarkly/go-sdk-common/v3 v3.4.0/go.mod h1:6MNeeP8b2VtsM6I3TbShCHW/+tYh2c+p5dB+ilS69sg= github.com/launchdarkly/go-test-helpers/v3 v3.0.1 h1:Z4lUVrh7+hIvL47KVjEBE/owbqqjKUEYTp4aBX/5OZM= github.com/launchdarkly/go-test-helpers/v3 v3.0.1/go.mod h1:u2ZvJlc/DDJTFrshWW50tWMZHLVYXofuSHUfTU/eIwM= github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8= github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mark3labs/mcp-go v0.39.1 h1:2oPxk7aDbQhouakkYyKl2T4hKFU1c6FDaubWyGyVE1k= +github.com/mark3labs/mcp-go v0.39.1/go.mod h1:T7tUa2jO6MavG+3P25Oy/jR7iCeJPHImCZHRymCn39g= github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= -github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-zglob v0.0.6 h1:mP8RnmCgho4oaUYDIDn6GNxYk+qJGUs8fJLn+twYj2A= github.com/mattn/go-zglob v0.0.6/go.mod h1:MxxjyoXXnMxfIpxTK2GAkw1w8glPsQILx3N5wrKakiY= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= -github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= +github.com/miekg/dns v1.1.64 h1:wuZgD9wwCE6XMT05UU/mlSko71eRSXEAm2EbjQXLKnQ= +github.com/miekg/dns v1.1.64/go.mod h1:Dzw9769uoKVaLuODMDZz9M6ynFU6Em65csPuoi8G0ck= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/ioprogress v0.0.0-20180201004757-6a23b12fa88e h1:Qa6dnn8DlasdXRnacluu8HzPts0S1I9zvvUPDbBnXFI= github.com/mitchellh/ioprogress v0.0.0-20180201004757-6a23b12fa88e/go.mod h1:waEya8ee1Ro/lgxpVhkJI4BVASzkm3UZqkx/cFJiYHM= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/buildkit v0.18.1 h1:Iwrz2F/Za2Gjkpwu3aM2LX92AFfJCJe2oNnvGNvh2Rc= -github.com/moby/buildkit v0.18.1/go.mod h1:vCR5CX8NGsPTthTg681+9kdmfvkvqJBXEv71GZe5msU= +github.com/moby/buildkit v0.20.2 h1:qIeR47eQ1tzI1rwz0on3Xx2enRw/1CKjFhoONVcTlMA= +github.com/moby/buildkit v0.20.2/go.mod h1:DhaF82FjwOElTftl0JUAJpH/SUIUx4UvcFncLeOtlDI= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= @@ -479,25 +484,25 @@ github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= -github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= -github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo= -github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8= +github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= +github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/nats.go v1.37.1-0.20241121095519-e963b776f24f h1:1d4vwE5L9eMQuYP/pvRQWc7usYhAuQxAEV5DJxE/9Kg= -github.com/nats-io/nats.go v1.37.1-0.20241121095519-e963b776f24f/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8= -github.com/nats-io/nkeys v0.4.8 h1:+wee30071y3vCZAYRsnrmIPaOe47A/SkK/UBDPdIV70= -github.com/nats-io/nkeys v0.4.8/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc= +github.com/nats-io/nats.go v1.43.0 h1:uRFZ2FEoRvP64+UUhaTokyS18XBCR/xM2vQZKO4i8ug= +github.com/nats-io/nats.go v1.43.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g= +github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0= +github.com/nats-io/nkeys v0.4.11/go.mod h1:szDimtgmfOi9n25JpfIdGw12tZFYXqhGxjhVxsatHVE= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/novln/docker-parser v1.0.0 h1:PjEBd9QnKixcWczNGyEdfUrP6GR0YUilAqG7Wksg3uc= @@ -508,21 +513,21 @@ github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= -github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= +github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= -github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8= +github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= -github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc= github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= @@ -533,8 +538,8 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.7 h1:uv+I3nNJvlKZIQGSr8JVQLNHFU9YhhNpvC14Y6KgmSM= -github.com/pkg/sftp v1.13.7/go.mod h1:KMKI0t3T6hfA+lTR/ssZdunHo+uwq7ghoN09/FSu3DY= +github.com/pkg/sftp v1.13.9 h1:4NGkvGudBL7GteO3m6qnaQ4pC0Kvf0onSVc9gR3EWBw= +github.com/pkg/sftp v1.13.9/go.mod h1:OBN7bVXdstkFFN/gdnHPUb5TE8eb8G1Rp9wCItqjkkA= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -545,16 +550,16 @@ github.com/prometheus/blackbox_exporter v0.25.0/go.mod h1:SpTDn8xW1XOstBQ1uVgw54 github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= -github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= @@ -565,21 +570,19 @@ github.com/r3labs/diff v1.1.0/go.mod h1:7WjXasNzi0vJetRcB/RqNl5dlIsmXcTTLmF5IoH6 github.com/rivo/tview v0.0.0-20220307222120-9994674d60a8 h1:xe+mmCnDN82KhC010l3NfYlA8ZbOuzbXAzSYBa6wbMc= github.com/rivo/tview v0.0.0-20220307222120-9994674d60a8/go.mod h1:WIfMkQNY+oq/mWwtsjOYHIZBuwthioY2srOmljJkTnk= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw= github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDjyw0ULyrTYWeN0UNCCkmCWfjPnIA2W6oviI= github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs= -github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= -github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= -github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= -github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= -github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc= -github.com/samber/lo v1.47.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU= +github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= +github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= +github.com/samber/lo v1.49.1 h1:4BIFyVfuQSEpluc7Fua+j1NolZHiEHEpaSEKdsH0tew= +github.com/samber/lo v1.49.1/go.mod h1:dO6KHFzUKXgP8LDhU0oI8d2hekjXnGOu0DB8Jecxd6o= github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE= @@ -593,8 +596,8 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= -github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY= +github.com/skeema/knownhosts v1.3.0/go.mod h1:sPINvnADmT/qYH1kfv+ePMmOBTH6Tbl7b5LvTDjFK7M= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= @@ -604,16 +607,17 @@ github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9yS github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spdx/tools-golang v0.5.3 h1:ialnHeEYUC4+hkm5vJm4qz2x+oEJbS0mAMFrNXdQraY= github.com/spdx/tools-golang v0.5.3/go.mod h1:/ETOahiAo96Ob0/RAIBmFZw6XN0yTnyr/uFZm2NTMhI= -github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= -github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= -github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= +github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= +github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= +github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -627,20 +631,20 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/superfly/fly-go v0.1.36 h1:0UeBJyAfjUB4a+Bbt+EyTJJnJBJ+fwit5H0Brh81/LE= -github.com/superfly/fly-go v0.1.36/go.mod h1:aPrmbN+GqUM3w8Y2pXMIqr9MPbF4ZiUnG82EAoXcvAY= -github.com/superfly/graphql v0.2.4 h1:Av8hSk4x8WvKJ6MTnEwrLknSVSGPc7DWpgT3z/kt3PU= -github.com/superfly/graphql v0.2.4/go.mod h1:CVfDl31srm8HnJ9udwLu6hFNUW/P6GUM2dKcG1YQ8jc= +github.com/superfly/fly-go v0.1.57 h1:d3p8i6YKqXKSJSxxNW7yWrRdCSBqXJkwduJ94ffB1Hs= +github.com/superfly/fly-go v0.1.57/go.mod h1:wpq4XNor10w9KurA15CBYRnhtT2mnemAXYHuqkhp2vI= +github.com/superfly/graphql v0.2.6 h1:zppbodNerWecoXEdjkhrqaNaSjGqobhXNlViHFuZzb4= +github.com/superfly/graphql v0.2.6/go.mod h1:CVfDl31srm8HnJ9udwLu6hFNUW/P6GUM2dKcG1YQ8jc= github.com/superfly/lfsc-go v0.1.1 h1:dGjLgt81D09cG+aR9lJZIdmonjZSR5zYCi7s54+ZU2Q= github.com/superfly/lfsc-go v0.1.1/go.mod h1:zVb0VENz/Il8Nmvvd4XAsX2bWhQ+sr0nK8vv9PeezcE= github.com/superfly/ltx v0.3.12 h1:Z7z1sc4g34/jUi3XO84+zBlIsbaoh2RJ3b4zTQpBK/M= github.com/superfly/ltx v0.3.12/go.mod h1:ly+Dq7UVacQVEI5/b0r6j+PSNy9ibwx1yikcWAaSkhE= -github.com/superfly/macaroon v0.2.14-0.20240819201738-61a02aa53648 h1:YQG1v1QcTFQxJureNBcbtxosZ98u78ceUNCDQgI/vgM= -github.com/superfly/macaroon v0.2.14-0.20240819201738-61a02aa53648/go.mod h1:Kt6/EdSYfFjR4GIe+erMwcJgU8iMu1noYVceQ5dNdKo= +github.com/superfly/macaroon v0.3.0 h1:tdRq5VqBCNJIlvYByZZ3bGDOKX/v0llQM/Ljd27DbU8= +github.com/superfly/macaroon v0.3.0/go.mod h1:ZAmlRD/Hmp/ddTxE8IonZ7NdTny2DcOffRvZhapQwJw= github.com/superfly/tokenizer v0.0.3-0.20240826174224-a17a2e0a9dc0 h1:0GZOxvuQ2u3XUY7Hr8N02zn4ZN9Iz2xgi3aNNaUpRO4= github.com/superfly/tokenizer v0.0.3-0.20240826174224-a17a2e0a9dc0/go.mod h1:w38ieJ28pCyIpQJzuDOKfN5z6Q6R92vOkAYtUv6FL9k= github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= @@ -650,22 +654,24 @@ github.com/tj/go-buffer v1.1.0/go.mod h1:iyiJpfFcR2B9sXu7KvjbT9fpM4mOelRSDTbntVj github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= -github.com/tonistiigi/fsutil v0.0.0-20241121093142-31cf1f437184 h1:RgyoSI38Y36zjQaszel/0RAcIehAnjA1B0RiUV9SDO4= -github.com/tonistiigi/fsutil v0.0.0-20241121093142-31cf1f437184/go.mod h1:Dl/9oEjK7IqnjAm21Okx/XIxUCFJzvh+XdVHUlBwXTw= +github.com/tonistiigi/fsutil v0.0.0-20250113203817-b14e27f4135a h1:EfGw4G0x/8qXWgtcZ6KVaPS+wpWOQMaypczzP8ojkMY= +github.com/tonistiigi/fsutil v0.0.0-20250113203817-b14e27f4135a/go.mod h1:Dl/9oEjK7IqnjAm21Okx/XIxUCFJzvh+XdVHUlBwXTw= github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4 h1:7I5c2Ig/5FgqkYOh/N87NzoyI9U15qUPXhDD8uCupv8= github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4/go.mod h1:278M4p8WsNh3n4a1eqiFcV2FGk7wE5fwUpUom9mK9lE= github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/v/cCndK0AMpt1wiVFb/YYmqB3/QG0= github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk= github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab h1:H6aJ0yKQ0gF49Qb2z5hI1UHxSQt4JMyxebFR15KnApw= github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab/go.mod h1:ulncasL3N9uLrVann0m+CDlJKWsIAP34MPcOJF6VRvc= -github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= -github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= -github.com/vektah/gqlparser/v2 v2.5.20 h1:kPaWbhBntxoZPaNdBaIPT1Kh0i1b/onb5kXgEdP5JCo= -github.com/vektah/gqlparser/v2 v2.5.20/go.mod h1:xMl+ta8a5M1Yo1A1Iwt/k7gSpscwSnHZdw7tfhEGfTM= +github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs= +github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI= +github.com/vektah/gqlparser/v2 v2.5.30 h1:EqLwGAFLIzt1wpx1IPpY67DwUujF1OfzgEyDsLrN6kE= +github.com/vektah/gqlparser/v2 v2.5.30/go.mod h1:D1/VCZtV3LPnQrcPBeR/q5jkSQIPti0uYCP/RI0gIeo= github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= +github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -675,33 +681,43 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= +github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= -go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.46.1 h1:gbhw/u49SS3gkPWiYweQNJGm/uJN5GkI/FrosxSHT7A= -go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.46.1/go.mod h1:GnOaBaFQ2we3b9AGWJpsBa7v1S5RlQzlC3O7dRMxZhM= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsux7Qmq8ToKAx1XCilTQECZ0KDZyTw= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 h1:4BZHA+B1wXEQoGNHxW8mURaLhcdGwvRnmhGbm+odRbc= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0/go.mod h1:3qi2EEwMgB4xnKgPLqsDP3j9qxnHDZeHsnAxfjQqTko= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 h1:kJxSDN4SgWWTjG/hPp3O7LCGLcHXFlvS2/FFOrwL+SE= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0/go.mod h1:mgIOzS7iZeKJdeB8/NYHrJ48fdGc71Llo5bJ1J4DWUE= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= +go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= +go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -713,14 +729,15 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY= -golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY= -golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= +golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= +golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f h1:XdNn9LlyWAhLVp6P/i8QYBW+hlyhrhei9uErw2B5GJo= +golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:D5SMRVC3C2/4+F/DB1wZsLRnSNimn2Sp/NPsCrsv8ak= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= @@ -729,8 +746,11 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= -golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U= +golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -743,14 +763,15 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= -golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= -golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= -golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I= +golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -759,8 +780,12 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -780,29 +805,31 @@ golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= +golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ= +golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -810,13 +837,15 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= -golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= +golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= +golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI= +golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190930201159-7c411dea38b0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -826,8 +855,10 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= -golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -836,24 +867,22 @@ golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeu golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI= golang.zx2c4.com/wireguard v0.0.0-20231211153847-12269c276173 h1:/jFs0duh4rdb8uIfPMv78iAJGcPKDeqAFnaLBropIC4= golang.zx2c4.com/wireguard v0.0.0-20231211153847-12269c276173/go.mod h1:tkCQ4FQXmpAgYVh++1cq16/dH4QJtmvpRv19DWGAHSA= -google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d h1:PksQg4dV6Sem3/HkBX+Ltq8T0ke0PKIRBNBatoDTVls= -google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:s7iA721uChleev562UJO2OYB0PPT9CMFjV+Ce7VJH5M= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= -google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= -google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= diff --git a/gql/generated.go b/gql/generated.go index 80eea5643d..960d696ecd 100644 --- a/gql/generated.go +++ b/gql/generated.go @@ -455,6 +455,8 @@ type AppData struct { Deployed bool `json:"deployed"` // Fly platform version PlatformVersion PlatformVersionEnum `json:"platformVersion"` + // Target to use for for CNAME DNS records + CnameTarget string `json:"cnameTarget"` // Secrets set on the application Secrets []AppDataSecretsSecret `json:"secrets"` // Organization that owns this app @@ -473,6 +475,9 @@ func (v *AppData) GetDeployed() bool { return v.Deployed } // GetPlatformVersion returns AppData.PlatformVersion, and is useful for accessing the field via an interface. func (v *AppData) GetPlatformVersion() PlatformVersionEnum { return v.PlatformVersion } +// GetCnameTarget returns AppData.CnameTarget, and is useful for accessing the field via an interface. +func (v *AppData) GetCnameTarget() string { return v.CnameTarget } + // GetSecrets returns AppData.Secrets, and is useful for accessing the field via an interface. func (v *AppData) GetSecrets() []AppDataSecretsSecret { return v.Secrets } @@ -504,6 +509,12 @@ func (v *AppDataOrganization) GetProvisionsBetaExtensions() bool { return v.OrganizationData.ProvisionsBetaExtensions } +// GetName returns AppDataOrganization.Name, and is useful for accessing the field via an interface. +func (v *AppDataOrganization) GetName() string { return v.OrganizationData.Name } + +// GetBillable returns AppDataOrganization.Billable, and is useful for accessing the field via an interface. +func (v *AppDataOrganization) GetBillable() bool { return v.OrganizationData.Billable } + func (v *AppDataOrganization) UnmarshalJSON(b []byte) error { if string(b) == "null" { @@ -541,6 +552,10 @@ type __premarshalAppDataOrganization struct { AddOnSsoLink string `json:"addOnSsoLink"` ProvisionsBetaExtensions bool `json:"provisionsBetaExtensions"` + + Name string `json:"name"` + + Billable bool `json:"billable"` } func (v *AppDataOrganization) MarshalJSON() ([]byte, error) { @@ -560,6 +575,8 @@ func (v *AppDataOrganization) __premarshalJSON() (*__premarshalAppDataOrganizati retval.PaidPlan = v.OrganizationData.PaidPlan retval.AddOnSsoLink = v.OrganizationData.AddOnSsoLink retval.ProvisionsBetaExtensions = v.OrganizationData.ProvisionsBetaExtensions + retval.Name = v.OrganizationData.Name + retval.Billable = v.OrganizationData.Billable return &retval, nil } @@ -726,6 +743,9 @@ func (v *CreateAppCreateAppCreateAppPayloadApp) GetPlatformVersion() PlatformVer return v.AppData.PlatformVersion } +// GetCnameTarget returns CreateAppCreateAppCreateAppPayloadApp.CnameTarget, and is useful for accessing the field via an interface. +func (v *CreateAppCreateAppCreateAppPayloadApp) GetCnameTarget() string { return v.AppData.CnameTarget } + // GetSecrets returns CreateAppCreateAppCreateAppPayloadApp.Secrets, and is useful for accessing the field via an interface. func (v *CreateAppCreateAppCreateAppPayloadApp) GetSecrets() []AppDataSecretsSecret { return v.AppData.Secrets @@ -774,6 +794,8 @@ type __premarshalCreateAppCreateAppCreateAppPayloadApp struct { PlatformVersion PlatformVersionEnum `json:"platformVersion"` + CnameTarget string `json:"cnameTarget"` + Secrets []AppDataSecretsSecret `json:"secrets"` Organization AppDataOrganization `json:"organization"` @@ -796,6 +818,7 @@ func (v *CreateAppCreateAppCreateAppPayloadApp) __premarshalJSON() (*__premarsha retval.Name = v.AppData.Name retval.Deployed = v.AppData.Deployed retval.PlatformVersion = v.AppData.PlatformVersion + retval.CnameTarget = v.AppData.CnameTarget retval.Secrets = v.AppData.Secrets retval.Organization = v.AppData.Organization return &retval, nil @@ -1576,6 +1599,9 @@ func (v *GetAddOnAddOnApp) GetDeployed() bool { return v.AppData.Deployed } // GetPlatformVersion returns GetAddOnAddOnApp.PlatformVersion, and is useful for accessing the field via an interface. func (v *GetAddOnAddOnApp) GetPlatformVersion() PlatformVersionEnum { return v.AppData.PlatformVersion } +// GetCnameTarget returns GetAddOnAddOnApp.CnameTarget, and is useful for accessing the field via an interface. +func (v *GetAddOnAddOnApp) GetCnameTarget() string { return v.AppData.CnameTarget } + // GetSecrets returns GetAddOnAddOnApp.Secrets, and is useful for accessing the field via an interface. func (v *GetAddOnAddOnApp) GetSecrets() []AppDataSecretsSecret { return v.AppData.Secrets } @@ -1616,6 +1642,8 @@ type __premarshalGetAddOnAddOnApp struct { PlatformVersion PlatformVersionEnum `json:"platformVersion"` + CnameTarget string `json:"cnameTarget"` + Secrets []AppDataSecretsSecret `json:"secrets"` Organization AppDataOrganization `json:"organization"` @@ -1636,6 +1664,7 @@ func (v *GetAddOnAddOnApp) __premarshalJSON() (*__premarshalGetAddOnAddOnApp, er retval.Name = v.AppData.Name retval.Deployed = v.AppData.Deployed retval.PlatformVersion = v.AppData.PlatformVersion + retval.CnameTarget = v.AppData.CnameTarget retval.Secrets = v.AppData.Secrets retval.Organization = v.AppData.Organization return &retval, nil @@ -1862,6 +1891,9 @@ func (v *GetAppApp) GetDeployed() bool { return v.AppData.Deployed } // GetPlatformVersion returns GetAppApp.PlatformVersion, and is useful for accessing the field via an interface. func (v *GetAppApp) GetPlatformVersion() PlatformVersionEnum { return v.AppData.PlatformVersion } +// GetCnameTarget returns GetAppApp.CnameTarget, and is useful for accessing the field via an interface. +func (v *GetAppApp) GetCnameTarget() string { return v.AppData.CnameTarget } + // GetSecrets returns GetAppApp.Secrets, and is useful for accessing the field via an interface. func (v *GetAppApp) GetSecrets() []AppDataSecretsSecret { return v.AppData.Secrets } @@ -1902,6 +1934,8 @@ type __premarshalGetAppApp struct { PlatformVersion PlatformVersionEnum `json:"platformVersion"` + CnameTarget string `json:"cnameTarget"` + Secrets []AppDataSecretsSecret `json:"secrets"` Organization AppDataOrganization `json:"organization"` @@ -1922,6 +1956,7 @@ func (v *GetAppApp) __premarshalJSON() (*__premarshalGetAppApp, error) { retval.Name = v.AppData.Name retval.Deployed = v.AppData.Deployed retval.PlatformVersion = v.AppData.PlatformVersion + retval.CnameTarget = v.AppData.CnameTarget retval.Secrets = v.AppData.Secrets retval.Organization = v.AppData.Organization return &retval, nil @@ -1959,6 +1994,9 @@ func (v *GetAppWithAddonsApp) GetPlatformVersion() PlatformVersionEnum { return v.AppData.PlatformVersion } +// GetCnameTarget returns GetAppWithAddonsApp.CnameTarget, and is useful for accessing the field via an interface. +func (v *GetAppWithAddonsApp) GetCnameTarget() string { return v.AppData.CnameTarget } + // GetSecrets returns GetAppWithAddonsApp.Secrets, and is useful for accessing the field via an interface. func (v *GetAppWithAddonsApp) GetSecrets() []AppDataSecretsSecret { return v.AppData.Secrets } @@ -2001,6 +2039,8 @@ type __premarshalGetAppWithAddonsApp struct { PlatformVersion PlatformVersionEnum `json:"platformVersion"` + CnameTarget string `json:"cnameTarget"` + Secrets []AppDataSecretsSecret `json:"secrets"` Organization AppDataOrganization `json:"organization"` @@ -2022,6 +2062,7 @@ func (v *GetAppWithAddonsApp) __premarshalJSON() (*__premarshalGetAppWithAddonsA retval.Name = v.AppData.Name retval.Deployed = v.AppData.Deployed retval.PlatformVersion = v.AppData.PlatformVersion + retval.CnameTarget = v.AppData.CnameTarget retval.Secrets = v.AppData.Secrets retval.Organization = v.AppData.Organization return &retval, nil @@ -2183,6 +2224,11 @@ func (v *GetAppsByRoleAppsAppConnectionNodesApp) GetPlatformVersion() PlatformVe return v.AppData.PlatformVersion } +// GetCnameTarget returns GetAppsByRoleAppsAppConnectionNodesApp.CnameTarget, and is useful for accessing the field via an interface. +func (v *GetAppsByRoleAppsAppConnectionNodesApp) GetCnameTarget() string { + return v.AppData.CnameTarget +} + // GetSecrets returns GetAppsByRoleAppsAppConnectionNodesApp.Secrets, and is useful for accessing the field via an interface. func (v *GetAppsByRoleAppsAppConnectionNodesApp) GetSecrets() []AppDataSecretsSecret { return v.AppData.Secrets @@ -2227,6 +2273,8 @@ type __premarshalGetAppsByRoleAppsAppConnectionNodesApp struct { PlatformVersion PlatformVersionEnum `json:"platformVersion"` + CnameTarget string `json:"cnameTarget"` + Secrets []AppDataSecretsSecret `json:"secrets"` Organization AppDataOrganization `json:"organization"` @@ -2247,6 +2295,7 @@ func (v *GetAppsByRoleAppsAppConnectionNodesApp) __premarshalJSON() (*__premarsh retval.Name = v.AppData.Name retval.Deployed = v.AppData.Deployed retval.PlatformVersion = v.AppData.PlatformVersion + retval.CnameTarget = v.AppData.CnameTarget retval.Secrets = v.AppData.Secrets retval.Organization = v.AppData.Organization return &retval, nil @@ -2336,6 +2385,12 @@ func (v *GetOrganizationOrganization) GetProvisionsBetaExtensions() bool { return v.OrganizationData.ProvisionsBetaExtensions } +// GetName returns GetOrganizationOrganization.Name, and is useful for accessing the field via an interface. +func (v *GetOrganizationOrganization) GetName() string { return v.OrganizationData.Name } + +// GetBillable returns GetOrganizationOrganization.Billable, and is useful for accessing the field via an interface. +func (v *GetOrganizationOrganization) GetBillable() bool { return v.OrganizationData.Billable } + func (v *GetOrganizationOrganization) UnmarshalJSON(b []byte) error { if string(b) == "null" { @@ -2373,6 +2428,10 @@ type __premarshalGetOrganizationOrganization struct { AddOnSsoLink string `json:"addOnSsoLink"` ProvisionsBetaExtensions bool `json:"provisionsBetaExtensions"` + + Name string `json:"name"` + + Billable bool `json:"billable"` } func (v *GetOrganizationOrganization) MarshalJSON() ([]byte, error) { @@ -2392,6 +2451,8 @@ func (v *GetOrganizationOrganization) __premarshalJSON() (*__premarshalGetOrgani retval.PaidPlan = v.OrganizationData.PaidPlan retval.AddOnSsoLink = v.OrganizationData.AddOnSsoLink retval.ProvisionsBetaExtensions = v.OrganizationData.ProvisionsBetaExtensions + retval.Name = v.OrganizationData.Name + retval.Billable = v.OrganizationData.Billable return &retval, nil } @@ -2598,6 +2659,9 @@ type OrganizationData struct { AddOnSsoLink string `json:"addOnSsoLink"` // Whether the organization can provision beta extensions ProvisionsBetaExtensions bool `json:"provisionsBetaExtensions"` + // Organization name + Name string `json:"name"` + Billable bool `json:"billable"` } // GetId returns OrganizationData.Id, and is useful for accessing the field via an interface. @@ -2618,6 +2682,12 @@ func (v *OrganizationData) GetAddOnSsoLink() string { return v.AddOnSsoLink } // GetProvisionsBetaExtensions returns OrganizationData.ProvisionsBetaExtensions, and is useful for accessing the field via an interface. func (v *OrganizationData) GetProvisionsBetaExtensions() bool { return v.ProvisionsBetaExtensions } +// GetName returns OrganizationData.Name, and is useful for accessing the field via an interface. +func (v *OrganizationData) GetName() string { return v.Name } + +// GetBillable returns OrganizationData.Billable, and is useful for accessing the field via an interface. +func (v *OrganizationData) GetBillable() bool { return v.Billable } + type PlatformVersionEnum string const ( @@ -3001,12 +3071,16 @@ func (v *__CreateTosAgreementInput) GetProviderName() string { return v.Provider // __DeleteAddOnInput is used internally by genqlient type __DeleteAddOnInput struct { - Name string `json:"name"` + Name string `json:"name"` + Provider string `json:"provider"` } // GetName returns __DeleteAddOnInput.Name, and is useful for accessing the field via an interface. func (v *__DeleteAddOnInput) GetName() string { return v.Name } +// GetProvider returns __DeleteAddOnInput.Provider, and is useful for accessing the field via an interface. +func (v *__DeleteAddOnInput) GetProvider() string { return v.Provider } + // __FlyctlConfigCurrentReleaseInput is used internally by genqlient type __FlyctlConfigCurrentReleaseInput struct { AppName string `json:"appName"` @@ -3345,6 +3419,7 @@ fragment AppData on App { name deployed platformVersion + cnameTarget secrets { name } @@ -3359,6 +3434,8 @@ fragment OrganizationData on Organization { paidPlan addOnSsoLink provisionsBetaExtensions + name + billable } ` @@ -3509,8 +3586,8 @@ func CreateTosAgreement( // The mutation executed by DeleteAddOn. const DeleteAddOn_Operation = ` -mutation DeleteAddOn ($name: String) { - deleteAddOn(input: {name:$name}) { +mutation DeleteAddOn ($name: String, $provider: String) { + deleteAddOn(input: {name:$name,provider:$provider}) { deletedAddOnName } } @@ -3520,12 +3597,14 @@ func DeleteAddOn( ctx_ context.Context, client_ graphql.Client, name string, + provider string, ) (data_ *DeleteAddOnResponse, err_ error) { req_ := &graphql.Request{ OpName: "DeleteAddOn", Query: DeleteAddOn_Operation, Variables: &__DeleteAddOnInput{ - Name: name, + Name: name, + Provider: provider, }, } @@ -3644,6 +3723,7 @@ fragment AppData on App { name deployed platformVersion + cnameTarget secrets { name } @@ -3658,6 +3738,8 @@ fragment OrganizationData on Organization { paidPlan addOnSsoLink provisionsBetaExtensions + name + billable } ` @@ -3755,6 +3837,7 @@ fragment AppData on App { name deployed platformVersion + cnameTarget secrets { name } @@ -3769,6 +3852,8 @@ fragment OrganizationData on Organization { paidPlan addOnSsoLink provisionsBetaExtensions + name + billable } ` @@ -3814,6 +3899,7 @@ fragment AppData on App { name deployed platformVersion + cnameTarget secrets { name } @@ -3837,6 +3923,8 @@ fragment OrganizationData on Organization { paidPlan addOnSsoLink provisionsBetaExtensions + name + billable } ` @@ -3881,6 +3969,7 @@ fragment AppData on App { name deployed platformVersion + cnameTarget secrets { name } @@ -3895,6 +3984,8 @@ fragment OrganizationData on Organization { paidPlan addOnSsoLink provisionsBetaExtensions + name + billable } ` @@ -4007,6 +4098,8 @@ fragment OrganizationData on Organization { paidPlan addOnSsoLink provisionsBetaExtensions + name + billable } ` diff --git a/gql/genqclient.graphql b/gql/genqclient.graphql index eb88856be6..086521b592 100644 --- a/gql/genqclient.graphql +++ b/gql/genqclient.graphql @@ -140,6 +140,8 @@ fragment OrganizationData on Organization { paidPlan addOnSsoLink provisionsBetaExtensions + name + billable } fragment AppData on App { @@ -147,6 +149,7 @@ fragment AppData on App { name deployed platformVersion + cnameTarget secrets { name } @@ -205,8 +208,8 @@ mutation SetNomadVMCount($input: SetVMCountInput!) { } } -mutation DeleteAddOn($name: String) { - deleteAddOn(input: {name: $name}) { +mutation DeleteAddOn($name: String, $provider: String) { + deleteAddOn(input: {name: $name, provider: $provider}) { deletedAddOnName } } diff --git a/gql/schema.graphql b/gql/schema.graphql index cca661aefc..d3b05f56da 100644 --- a/gql/schema.graphql +++ b/gql/schema.graphql @@ -161,6 +161,7 @@ type AddOn implements Node { """ last: Int ): AppConnection + createdAt: ISO8601DateTime! """ Environment variables for the add-on @@ -237,6 +238,7 @@ type AddOn implements Node { Status of the add-on """ status: String + updatedAt: ISO8601DateTime! } """ @@ -440,6 +442,11 @@ input AddWireGuardPeerInput { """ clientMutationId: String + """ + An ephemeral peer will be destroyed if not validated for too long + """ + ephemeral: Boolean + """ Create peer on specific gateway """ @@ -575,6 +582,49 @@ type AllocateIPAddressPayload { ipAddress: IPAddress } +""" +Autogenerated input type of AllocateManagedServiceIPAddress +""" +input AllocateManagedServiceIPAddressInput { + """ + The application to allocate the ip address for + """ + appId: String! + + """ + A unique identifier for the client performing the mutation. + """ + clientMutationId: String + + """ + The target network name in the specified organization + """ + network: String + + """ + The organization whose network should be used for private IP allocation + """ + organizationId: String + + """ + The name of the associated service + """ + serviceName: String +} + +""" +Autogenerated return type of AllocateManagedServiceIPAddress. +""" +type AllocateManagedServiceIPAddressPayload { + app: App! + + """ + A unique identifier for the client performing the mutation. + """ + clientMutationId: String + ipAssignmentId: Int +} + type Allocation implements Node { attachedVolumes( """ @@ -785,6 +835,11 @@ type App implements Node { """ last: Int ): AppChangeConnection! + + """ + Target to use for for CNAME DNS records + """ + cnameTarget: String config: AppConfig! createdAt: ISO8601DateTime! currentLock: AppLock @@ -1149,6 +1204,7 @@ type AppCertificate implements Node { id: ID! isAcmeAlpnConfigured: Boolean! isAcmeDnsConfigured: Boolean! + isAcmeHttpConfigured: Boolean! isApex: Boolean! isConfigured: Boolean! isWildcard: Boolean! @@ -1215,7 +1271,20 @@ type AppCertificateEdge { } type AppCertificateValidationError { + """ + Structured error code + """ + errorCode: CertificateValidationErrorCodeEnum + + """ + Human-readable error message + """ message: String! + + """ + Actionable steps to resolve this error + """ + remediation: String timestamp: ISO8601DateTime! } @@ -1523,6 +1592,7 @@ enum BillingStatus { DELINQUENT PAST_DUE SOURCE_REQUIRED + SUSPENDED TRIAL_ACTIVE TRIAL_ENDED } @@ -1707,30 +1777,10 @@ input BuildMachineInput { """ config: JSON! - """ - [deprecated] - """ - dedicationId: String - """ The flyd ID of the machine """ id: String - - """ - Name of the machine - """ - name: String - - """ - Region for the machine - """ - region: String - - """ - Skip checks for machine creation - """ - skipTrustChecks: Boolean } """ @@ -1742,10 +1792,6 @@ type BuildMachinePayload { """ clientMutationId: String config: JSON! - hostId: Int - name: String! - region: String! - timeout: Int! } input BuildStrategyAttemptInput { @@ -1958,6 +2004,76 @@ type CertificateEdge { node: Certificate } +""" +Possible certificate validation error codes +""" +enum CertificateValidationErrorCodeEnum { + """ + CAA records are blocking Let's Encrypt certificate issuance + """ + CAA_RECORD_INVALID + + """ + CAA records for Let's Encrypt are missing Fly's account URI + """ + CAA_RECORD_MISSING_FLY + + """ + DNS records are not configured for this hostname + """ + DNS_NOT_CONFIGURED + + """ + DNS records do not match the expected values + """ + DNS_RECORD_MISMATCH + + """ + Some DNS records are correct, but others do not match the expected values + """ + DNS_RECORD_PARTIAL_MATCH + + """ + DNS resolution timed out + """ + DNS_TIMEOUT + + """ + DNS validation for Let's Encrypt certificate failed + """ + DNS_VALIDATION_FAILED + + """ + Hostname does not match the certificate request + """ + HTTP_HOSTNAME_MISMATCH + + """ + HTTP validation requires IPv6 access to your app + """ + HTTP_IPV6_REQUIRED + + """ + Your app does not have any IPv6 records allocated + """ + IPV6_NOT_ALLOCATED + + """ + No AAAA records were found for your domain + """ + IPV6_NOT_FOUND + + """ + No IP addresses are allocated to this app + """ + NO_ALLOCATED_IPS + + """ + Service exposing port 443 does not have a TLS handler configured + """ + TLS_HANDLER_MISSING +} + """ Autogenerated input type of ChangeOrganizationPlan """ @@ -2048,39 +2164,6 @@ type CheckCertificatePayload { clientMutationId: String } -""" -Autogenerated input type of CheckDomain -""" -input CheckDomainInput { - """ - A unique identifier for the client performing the mutation. - """ - clientMutationId: String - - """ - Domain name to check - """ - domainName: String! -} - -""" -Autogenerated return type of CheckDomain. -""" -type CheckDomainPayload { - """ - A unique identifier for the client performing the mutation. - """ - clientMutationId: String - dnsAvailable: Boolean! - domainName: String! - registrationAvailable: Boolean! - registrationPeriod: Int - registrationPrice: Int - registrationSupported: Boolean! - tld: String! - transferAvailable: Boolean! -} - """ check job http response """ @@ -2674,85 +2757,6 @@ type CreateAddOnPayload { clientMutationId: String } -""" -Autogenerated input type of CreateAndRegisterDomain -""" -input CreateAndRegisterDomainInput { - """ - Enable auto renew on the registration - """ - autoRenew: Boolean - - """ - A unique identifier for the client performing the mutation. - """ - clientMutationId: String - - """ - The domain name - """ - name: String! - - """ - The node ID of the organization - """ - organizationId: ID! - - """ - Enable whois privacy on the registration - """ - whoisPrivacy: Boolean -} - -""" -Autogenerated return type of CreateAndRegisterDomain. -""" -type CreateAndRegisterDomainPayload { - """ - A unique identifier for the client performing the mutation. - """ - clientMutationId: String - domain: Domain! - organization: Organization! -} - -""" -Autogenerated input type of CreateAndTransferDomain -""" -input CreateAndTransferDomainInput { - """ - The authorization code - """ - authorizationCode: String! - - """ - A unique identifier for the client performing the mutation. - """ - clientMutationId: String - - """ - The domain name - """ - name: String! - - """ - The node ID of the organization - """ - organizationId: ID! -} - -""" -Autogenerated return type of CreateAndTransferDomain. -""" -type CreateAndTransferDomainPayload { - """ - A unique identifier for the client performing the mutation. - """ - clientMutationId: String - domain: Domain! - organization: Organization! -} - """ Autogenerated input type of CreateApp """ @@ -3145,38 +3149,6 @@ type CreateDoctorUrlPayload { putUrl: String! } -""" -Autogenerated input type of CreateDomain -""" -input CreateDomainInput { - """ - A unique identifier for the client performing the mutation. - """ - clientMutationId: String - - """ - The domain name - """ - name: String! - - """ - The node ID of the organization - """ - organizationId: ID! -} - -""" -Autogenerated return type of CreateDomain. -""" -type CreateDomainPayload { - """ - A unique identifier for the client performing the mutation. - """ - clientMutationId: String - domain: Domain! - organization: Organization! -} - """ Autogenerated input type of CreateExtensionTosAgreement """ @@ -4304,32 +4276,6 @@ type DeleteDeploymentSourcePayload { clientMutationId: String } -""" -Autogenerated input type of DeleteDomain -""" -input DeleteDomainInput { - """ - A unique identifier for the client performing the mutation. - """ - clientMutationId: String - - """ - The node ID of the domain - """ - domainId: ID! -} - -""" -Autogenerated return type of DeleteDomain. -""" -type DeleteDomainPayload { - """ - A unique identifier for the client performing the mutation. - """ - clientMutationId: String - organization: Organization! -} - """ Autogenerated input type of DeleteHealthCheckHandler """ @@ -4423,6 +4369,30 @@ type DeleteLimitedAccessTokenPayload { token: String } +""" +Autogenerated input type of DeleteNetworkPolicy +""" +input DeleteNetworkPolicyInput { + appId: ID! + + """ + A unique identifier for the client performing the mutation. + """ + clientMutationId: String + id: String! +} + +""" +Autogenerated return type of DeleteNetworkPolicy. +""" +type DeleteNetworkPolicyPayload { + """ + A unique identifier for the client performing the mutation. + """ + clientMutationId: String + ok: Boolean! +} + """ Autogenerated input type of DeleteOrganization """ @@ -4533,6 +4503,30 @@ type DeleteRemoteBuilderPayload { organization: Organization! } +""" +Autogenerated input type of DeleteService +""" +input DeleteServiceInput { + appId: ID! + + """ + A unique identifier for the client performing the mutation. + """ + clientMutationId: String + name: String! +} + +""" +Autogenerated return type of DeleteService. +""" +type DeleteServicePayload { + """ + A unique identifier for the client performing the mutation. + """ + clientMutationId: String + ok: Boolean! +} + """ Autogenerated input type of DeleteThirdPartyConfiguration """ @@ -6716,6 +6710,12 @@ type Mutations { """ input: AllocateIPAddressInput! ): AllocateIPAddressPayload + allocateManagedServiceIpAddress( + """ + Parameters for AllocateManagedServiceIPAddress + """ + input: AllocateManagedServiceIPAddressInput! + ): AllocateManagedServiceIPAddressPayload attachPostgresCluster( """ Parameters for AttachPostgresCluster @@ -6752,12 +6752,6 @@ type Mutations { """ input: CheckCertificateInput! ): CheckCertificatePayload - checkDomain( - """ - Parameters for CheckDomain - """ - input: CheckDomainInput! - ): CheckDomainPayload configureRegions( """ Parameters for ConfigureRegions @@ -6782,18 +6776,6 @@ type Mutations { """ input: CreateAddOnInput! ): CreateAddOnPayload - createAndRegisterDomain( - """ - Parameters for CreateAndRegisterDomain - """ - input: CreateAndRegisterDomainInput! - ): CreateAndRegisterDomainPayload - createAndTransferDomain( - """ - Parameters for CreateAndTransferDomain - """ - input: CreateAndTransferDomainInput! - ): CreateAndTransferDomainPayload createApp( """ Parameters for CreateApp @@ -6849,12 +6831,6 @@ type Mutations { input: CreateDoctorReportInput! ): CreateDoctorReportPayload createDoctorUrl: CreateDoctorUrlPayload - createDomain( - """ - Parameters for CreateDomain - """ - input: CreateDomainInput! - ): CreateDomainPayload createExtensionTosAgreement( """ Parameters for CreateExtensionTosAgreement @@ -7008,12 +6984,6 @@ type Mutations { """ input: DeleteDNSRecordInput! ): DeleteDNSRecordPayload - deleteDomain( - """ - Parameters for DeleteDomain - """ - input: DeleteDomainInput! - ): DeleteDomainPayload deleteHealthCheckHandler( """ Parameters for DeleteHealthCheckHandler @@ -7032,6 +7002,12 @@ type Mutations { """ input: DeleteLimitedAccessTokenInput! ): DeleteLimitedAccessTokenPayload + deleteNetworkPolicy( + """ + Parameters for DeleteNetworkPolicy + """ + input: DeleteNetworkPolicyInput! + ): DeleteNetworkPolicyPayload deleteOrganization( """ Parameters for DeleteOrganization @@ -7056,6 +7032,12 @@ type Mutations { """ input: DeleteRemoteBuilderInput! ): DeleteRemoteBuilderPayload + deleteService( + """ + Parameters for DeleteService + """ + input: DeleteServiceInput! + ): DeleteServicePayload deleteThirdPartyConfiguration( """ Parameters for DeleteThirdPartyConfiguration @@ -7293,18 +7275,24 @@ type Mutations { """ input: RedeemOrganizationInvitationInput! ): RedeemOrganizationInvitationPayload - registerDomain( - """ - Parameters for RegisterDomain - """ - input: RegisterDomainInput! - ): RegisterDomainPayload registerMachine( """ Parameters for RegisterMachine """ input: RegisterMachineInput! ): RegisterMachinePayload + registerNetworkPolicy( + """ + Parameters for RegisterNetworkPolicy + """ + input: RegisterNetworkPolicyInput! + ): RegisterNetworkPolicyPayload + registerService( + """ + Parameters for RegisterService + """ + input: RegisterServiceInput! + ): RegisterServicePayload registerVolume( """ Parameters for RegisterVolume @@ -8717,6 +8705,11 @@ type Queries { Returns the latest available tag for a given image repository """ latestImageDetails( + """ + Fly version to use for tag resolution + """ + flyVersion: String + """ /: """ @@ -8930,42 +8923,6 @@ type RegionPlacement { region: String! } -""" -Autogenerated input type of RegisterDomain -""" -input RegisterDomainInput { - """ - Enable auto renew on the registration - """ - autoRenew: Boolean - - """ - A unique identifier for the client performing the mutation. - """ - clientMutationId: String - - """ - The node ID of the domain - """ - domainId: ID! - - """ - Enable whois privacy on the registration - """ - whoisPrivacy: Boolean -} - -""" -Autogenerated return type of RegisterDomain. -""" -type RegisterDomainPayload { - """ - A unique identifier for the client performing the mutation. - """ - clientMutationId: String - domain: Domain! -} - """ Autogenerated input type of RegisterMachine """ @@ -9037,6 +8994,61 @@ type RegisterMachinePayload { id: ID! } +""" +Autogenerated input type of RegisterNetworkPolicy +""" +input RegisterNetworkPolicyInput { + appId: ID! + + """ + A unique identifier for the client performing the mutation. + """ + clientMutationId: String + config: String! + id: String! + name: String! + updatedAt: String! +} + +""" +Autogenerated return type of RegisterNetworkPolicy. +""" +type RegisterNetworkPolicyPayload { + """ + A unique identifier for the client performing the mutation. + """ + clientMutationId: String + ok: Boolean! +} + +""" +Autogenerated input type of RegisterService +""" +input RegisterServiceInput { + appId: ID! + + """ + A unique identifier for the client performing the mutation. + """ + clientMutationId: String + config: String! + id: String! + kind: String! + name: String! + updatedAt: String! +} + +""" +Autogenerated return type of RegisterService. +""" +type RegisterServicePayload { + """ + A unique identifier for the client performing the mutation. + """ + clientMutationId: String + ok: Boolean! +} + """ Autogenerated input type of RegisterVolume """ diff --git a/installers/install.ps1 b/installers/install.ps1 deleted file mode 100644 index a8dec6fdf0..0000000000 --- a/installers/install.ps1 +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/env pwsh -# Copyright 2018 the Deno authors. All rights reserved. MIT license. -# TODO(everyone): Keep this script simple and easily auditable. - -$ErrorActionPreference = 'Stop' - -$Version = if ($v) { - $v -} elseif ($args.Length -eq 1) { - $args.Get(0) -} else { - "latest" -} - -$FlyInstall = $env:FLYCTL_INSTALL -$BinDir = if ($FlyInstall) { - "$FlyInstall\bin" -} else { - "$Home\.fly\bin" -} - -$FlyZip = "$BinDir\flyctl.zip" -$FlyctlExe = "$BinDir\flyctl.exe" -$WintunDll = "$BinDir\wintun.dll" -$FlyExe = "$BinDir\fly.exe" - -# Fly & GitHub require TLS 1.2 -[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 - -try { - $Response = Invoke-WebRequest "https://api.fly.io/app/flyctl_releases/windows/x86_64/$Version" -UseBasicParsing - $FlyUri = $Response.Content -} -catch { - $StatusCode = $_.Exception.Response.StatusCode.value__ - if ($StatusCode -eq 404) { - Write-Error "Unable to find a flyctl release on GitHub for version:$Version - see github.com/superfly/flyctl/releases for all versions" - } else { - $Request = $_.Exception - Write-Error "Error while fetching releases: $Request" - } - Exit 1 -} - -if (!(Test-Path $BinDir)) { - New-Item $BinDir -ItemType Directory | Out-Null -} - -$prevProgressPreference = $ProgressPreference -try { - # Invoke-WebRequest on older powershell versions has severe transfer - # performance issues due to progress bar rendering - the screen updates - # end up throttling the download itself. Disable progress on these older - # versions. - if ($PSVersionTable.PSVersion.Major -lt 7) { - Write-Output "Downloading flyctl..." - $ProgressPreference = "SilentlyContinue" - } - - Invoke-WebRequest $FlyUri -OutFile $FlyZip -UseBasicParsing -} finally { - $ProgressPreference = $prevProgressPreference -} - -if (Get-Command Expand-Archive -ErrorAction SilentlyContinue) { - Expand-Archive $FlyZip -Destination $BinDir -Force -} else { - Remove-Item $FlyctlExe -ErrorAction SilentlyContinue - Remove-Item $FlyExe -ErrorAction SilentlyContinue - Remove-Item $WintunDll -ErrorAction SilentlyContinue - Add-Type -AssemblyName System.IO.Compression.FileSystem - [IO.Compression.ZipFile]::ExtractToDirectory($FlyZip, $BinDir) -} - -Remove-Item $FlyZip - -$User = [EnvironmentVariableTarget]::User -$Path = [Environment]::GetEnvironmentVariable('Path', $User) -if (!(";$Path;".ToLower() -like "*;$BinDir;*".ToLower())) { - [Environment]::SetEnvironmentVariable('Path', "$Path;$BinDir", $User) - $Env:Path += ";$BinDir" -} - -if (!(Get-Item $FlyExe -ErrorAction SilentlyContinue).LinkTarget) { - # if fly.exe is not already a symlink, make it so. - - # delete any existing file - Remove-Item $FlyExe -ErrorAction SilentlyContinue - - # creating symlinks on windows requires administrator privileges by default, - # passing `-Verb runAs` means we'll pop up a UAC dialog here - Start-Process -FilePath "$env:comspec" -ArgumentList "/c", "mklink", $FlyExe, $FlyctlExe -Verb runAs -WorkingDirectory "$env:windir" -} - -Write-Output "flyctl was installed successfully to $FlyctlExe" -Write-Output "Run 'flyctl --help' to get started" diff --git a/installers/install.sh b/installers/install.sh deleted file mode 100644 index 219952f3a3..0000000000 --- a/installers/install.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/sh -# Based on Deno installer: Copyright 2019 the Deno authors. All rights reserved. MIT license. -# TODO(everyone): Keep this script simple and easily auditable. - -set -e - -main() { - os=$(uname -s) - arch=$(uname -m) - version=${1:-latest} - - flyctl_uri=$(curl -s ${FLY_FORCE_TRACE:+ -H "Fly-Force-Trace: $FLY_FORCE_TRACE"} https://api.fly.io/app/flyctl_releases/$os/$arch/$version) - if [ ! "$flyctl_uri" ]; then - echo "Error: Unable to find a flyctl release for $os/$arch/$version - see github.com/superfly/flyctl/releases for all versions" 1>&2 - exit 1 - fi - - flyctl_install="${FLYCTL_INSTALL:-$HOME/.fly}" - - bin_dir="$flyctl_install/bin" - tmp_dir="$flyctl_install/tmp" - exe="$bin_dir/flyctl" - simexe="$bin_dir/fly" - - mkdir -p "$bin_dir" - mkdir -p "$tmp_dir" - - curl -q --fail --location --progress-bar --output "$tmp_dir/flyctl.tar.gz" "$flyctl_uri" - # extract to tmp dir so we don't open existing executable file for writing: - tar -C "$tmp_dir" -xzf "$tmp_dir/flyctl.tar.gz" - chmod +x "$tmp_dir/flyctl" - # atomically rename into place: - mv "$tmp_dir/flyctl" "$exe" - rm "$tmp_dir/flyctl.tar.gz" - - ln -sf $exe $simexe - - if [ "${1}" = "prerel" ] || [ "${1}" = "pre" ]; then - "$exe" version -s "shell-prerel" - else - "$exe" version -s "shell" - fi - - echo "flyctl was installed successfully to $exe" - if command -v flyctl >/dev/null; then - echo "Run 'flyctl --help' to get started" - else - case $SHELL in - /bin/zsh) shell_profile=".zshrc" ;; - *) shell_profile=".bash_profile" ;; - esac - echo "Manually add the directory to your \$HOME/$shell_profile (or similar)" - echo " export FLYCTL_INSTALL=\"$flyctl_install\"" - echo " export PATH=\"\$FLYCTL_INSTALL/bin:\$PATH\"" - echo "Run '$exe --help' to get started" - fi -} - -main "$1" diff --git a/internal/appconfig/config.go b/internal/appconfig/config.go index 9c3a3c6172..b0fc2370f2 100644 --- a/internal/appconfig/config.go +++ b/internal/appconfig/config.go @@ -3,14 +3,18 @@ package appconfig import ( + "context" "encoding/base64" "fmt" "net/url" "os" + "path/filepath" "reflect" "slices" fly "github.com/superfly/fly-go" + "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/launchdarkly" ) const ( @@ -18,6 +22,14 @@ const ( DefaultConfigFileName = "fly.toml" ) +// Well-known docker compose filenames in order of preference +var WellKnownComposeFilenames = []string{ + "compose.yaml", + "compose.yml", + "docker-compose.yaml", + "docker-compose.yml", +} + type RestartPolicy string const ( @@ -58,6 +70,15 @@ type Config struct { Files []File `toml:"files,omitempty" json:"files,omitempty"` HostDedicationID string `toml:"host_dedication_id,omitempty" json:"host_dedication_id,omitempty"` + // Pilot Container support: configuration, including the set of containers, can either + // be specified in a separate file or in the fly.toml file itself. If containers are + // defined, one container can be identified as the "app" container, which is the + // the one where the image is replaced upon deploy. If no container is identified, + // this will default to the "app" container, and if that is not present, the first + // container in the list will be used. + MachineConfig string `toml:"machine_config,omitempty" json:"machine_config,omitempty"` + Container string `toml:"container,omitempty" json:"container,omitempty"` + MachineChecks []*ServiceMachineCheck `toml:"machine_checks,omitempty" json:"machine_checks,omitempty"` Restart []Restart `toml:"restart,omitempty" json:"restart,omitempty"` @@ -93,6 +114,7 @@ type Deploy struct { ReleaseCommand string `toml:"release_command,omitempty" json:"release_command,omitempty"` ReleaseCommandTimeout *fly.Duration `toml:"release_command_timeout,omitempty" json:"release_command_timeout,omitempty"` ReleaseCommandCompute *Compute `toml:"release_command_vm,omitempty" json:"release_command_vm,omitempty"` + SeedCommand string `toml:"seed_command,omitempty" json:"seed_command,omitempty"` } type File struct { @@ -136,12 +158,17 @@ type Mount struct { Destination string `toml:"destination,omitempty" json:"destination,omitempty"` InitialSize string `toml:"initial_size,omitempty" json:"initial_size,omitempty"` SnapshotRetention *int `toml:"snapshot_retention,omitempty" json:"snapshot_retention,omitempty"` + ScheduledSnapshots *bool `toml:"scheduled_snapshots,omitempty" json:"scheduled_snapshots,omitempty"` AutoExtendSizeThreshold int `toml:"auto_extend_size_threshold,omitempty" json:"auto_extend_size_threshold,omitempty"` AutoExtendSizeIncrement string `toml:"auto_extend_size_increment,omitempty" json:"auto_extend_size_increment,omitempty"` AutoExtendSizeLimit string `toml:"auto_extend_size_limit,omitempty" json:"auto_extend_size_limit,omitempty"` Processes []string `toml:"processes,omitempty" json:"processes,omitempty"` } +type BuildCompose struct { + File string `toml:"file,omitempty" json:"file,omitempty"` +} + type Build struct { Builder string `toml:"builder,omitempty" json:"builder,omitempty"` Args map[string]string `toml:"args,omitempty" json:"args,omitempty"` @@ -152,6 +179,9 @@ type Build struct { Dockerfile string `toml:"dockerfile,omitempty" json:"dockerfile,omitempty"` Ignorefile string `toml:"ignorefile,omitempty" json:"ignorefile,omitempty"` DockerBuildTarget string `toml:"build-target,omitempty" json:"build-target,omitempty"` + Compose *BuildCompose `toml:"compose,omitempty" json:"compose,omitempty"` + Compression string `toml:"compression,omitempty" json:"compression,omitempty"` + CompressionLevel *int `toml:"compression_level,omitempty" json:"compression_level,omitempty"` } type Experimental struct { @@ -164,7 +194,6 @@ type Experimental struct { LazyLoadImages bool `toml:"lazy_load_images,omitempty" json:"lazy_load_images,omitempty"` Attached Attached `toml:"attached,omitempty" json:"attached,omitempty"` MachineConfig string `toml:"machine_config,omitempty" json:"machine_config,omitempty"` - UseZstd bool `toml:"use_zstd,omitempty" json:"use_zstd,omitempty"` } type Attached struct { @@ -223,6 +252,41 @@ func (c *Config) DetermineIPType(ipType string) string { return "shared" } +func (c *Config) DetermineCompression(ctx context.Context) (compression string, compressionLevel int) { + // Set default values + compression = "gzip" + compressionLevel = 7 + + // LaunchDarkly provides the base settings + ldClient := launchdarkly.ClientFromContext(ctx) + if ldClient.UseZstdEnabled() { + compression = "zstd" + } + if strength, ok := ldClient.GetCompressionStrength().(float64); ok { + compressionLevel = int(strength) + } + + // fly.toml overrides LaunchDarkly + if c.Build != nil { + if c.Build.Compression != "" { + compression = c.Build.Compression + } + if c.Build.CompressionLevel != nil { + compressionLevel = *c.Build.CompressionLevel + } + } + + // CLI flags override everything + if flag.IsSpecified(ctx, "compression") { + compression = flag.GetString(ctx, "compression") + } + if flag.IsSpecified(ctx, "compression-level") { + compressionLevel = flag.GetInt(ctx, "compression-level") + } + + return +} + // IsUsingGPU returns true if any VMs have a gpu-kind set. func (c *Config) IsUsingGPU() bool { for _, vm := range c.Compute { @@ -274,48 +338,48 @@ func (c *Config) InternalPort() int { return 0 } -func (cfg *Config) BuildStrategies() []string { +func (c *Config) BuildStrategies() []string { strategies := []string{} - if cfg == nil || cfg.Build == nil { + if c == nil || c.Build == nil { return strategies } - if cfg.Build.Image != "" { - strategies = append(strategies, fmt.Sprintf("the \"%s\" docker image", cfg.Build.Image)) + if c.Build.Image != "" { + strategies = append(strategies, fmt.Sprintf("the \"%s\" docker image", c.Build.Image)) } - if cfg.Build.Builder != "" || len(cfg.Build.Buildpacks) > 0 { + if c.Build.Builder != "" || len(c.Build.Buildpacks) > 0 { strategies = append(strategies, "a buildpack") } - if cfg.Build.Dockerfile != "" || cfg.Build.DockerBuildTarget != "" { - if cfg.Build.Dockerfile != "" { - strategies = append(strategies, fmt.Sprintf("the \"%s\" dockerfile", cfg.Build.Dockerfile)) + if c.Build.Dockerfile != "" || c.Build.DockerBuildTarget != "" { + if c.Build.Dockerfile != "" { + strategies = append(strategies, fmt.Sprintf("the \"%s\" dockerfile", c.Build.Dockerfile)) } else { strategies = append(strategies, "a dockerfile") } } - if cfg.Build.Builtin != "" { - strategies = append(strategies, fmt.Sprintf("the \"%s\" builtin image", cfg.Build.Builtin)) + if c.Build.Builtin != "" { + strategies = append(strategies, fmt.Sprintf("the \"%s\" builtin image", c.Build.Builtin)) } return strategies } -func (cfg *Config) URL() *url.URL { +func (c *Config) URL() *url.URL { u := &url.URL{ Scheme: "https", - Host: cfg.AppName + ".fly.dev", + Host: c.AppName + ".fly.dev", Path: "/", } // HTTPService always listen on https, even if ForceHTTPS is false - if cfg.HTTPService != nil && cfg.HTTPService.InternalPort > 0 { + if c.HTTPService != nil && c.HTTPService.InternalPort > 0 { return u } var httpPorts []int var httpsPorts []int - for _, service := range cfg.Services { + for _, service := range c.Services { for _, port := range service.Ports { if port.Port == nil || !slices.Contains(port.Handlers, "http") { continue @@ -350,10 +414,10 @@ func (cfg *Config) URL() *url.URL { // MergeFiles merges the provided files with the files in the config wherein the provided files // take precedence. -func (cfg *Config) MergeFiles(files []*fly.File) error { +func (c *Config) MergeFiles(files []*fly.File) error { // First convert the Config files to Machine files. - cfgFiles := make([]*fly.File, 0, len(cfg.Files)) - for _, f := range cfg.Files { + cfgFiles := make([]*fly.File, 0, len(c.Files)) + for _, f := range c.Files { machineFile, err := f.toMachineFile() if err != nil { return err @@ -368,14 +432,36 @@ func (cfg *Config) MergeFiles(files []*fly.File) error { fly.MergeFiles(mConfig, files) // Persist the merged files back to the config to be used later for deploying. - cfg.MergedFiles = mConfig.Files + c.MergedFiles = mConfig.Files return nil } -func (cfg *Config) DeployStrategy() string { - if cfg.Deploy == nil { +func (c *Config) DeployStrategy() string { + if c.Deploy == nil { return "" } - return cfg.Deploy.Strategy + return c.Deploy.Strategy +} + +// DetectComposeFile returns Build.Compose.File if set, otherwise looks for +// well-known compose filenames in the directory containing the config file. +// Returns the first found filename or empty string. +func (c *Config) DetectComposeFile() string { + // If compose file is explicitly set, return it + if c.Build != nil && c.Build.Compose != nil && c.Build.Compose.File != "" { + return c.Build.Compose.File + } + + // Otherwise, detect well-known filenames + configDir := filepath.Dir(c.configFilePath) + + for _, filename := range WellKnownComposeFilenames { + path := filepath.Join(configDir, filename) + if _, err := os.Stat(path); err == nil { + return filename + } + } + + return "" } diff --git a/internal/appconfig/context.go b/internal/appconfig/context.go index 78eaa0f4ad..dcfca056da 100644 --- a/internal/appconfig/context.go +++ b/internal/appconfig/context.go @@ -10,6 +10,7 @@ const ( _ contextKeyType = iota configContextKey nameContextKey + seedContextKey ) // WithConfig derives a context that carries cfg from ctx. @@ -39,3 +40,17 @@ func NameFromContext(ctx context.Context) string { return "" } + +// WithSeed derives a context that carries the given seed from ctx. +func WithSeedCommand(ctx context.Context, seedCommand string) context.Context { + return context.WithValue(ctx, seedContextKey, seedCommand) +} + +// SeedFromContext returns the seed ctx carries or an empty string. +func SeedCommandFromContext(ctx context.Context) string { + if seed, ok := ctx.Value(seedContextKey).(string); ok { + return seed + } + + return "" +} diff --git a/internal/appconfig/definition_test.go b/internal/appconfig/definition_test.go index d2a8047aff..d87799189e 100644 --- a/internal/appconfig/definition_test.go +++ b/internal/appconfig/definition_test.go @@ -271,15 +271,13 @@ func TestToDefinition(t *testing.T) { }, "metrics": []any{ map[string]any{ - "port": int64(9999), - "path": "/metrics", - "https": false, + "port": int64(9999), + "path": "/metrics", }, map[string]any{ "port": int64(9998), "path": "/metrics", "processes": []any{"web"}, - "https": false, }, }, "statics": []any{ @@ -306,10 +304,11 @@ func TestToDefinition(t *testing.T) { }, }, "mounts": []any{map[string]any{ - "source": "data", - "destination": "/data", - "initial_size": "30gb", - "snapshot_retention": int64(17), + "source": "data", + "destination": "/data", + "initial_size": "30gb", + "snapshot_retention": int64(17), + "scheduled_snapshots": true, }}, "processes": map[string]any{ "web": "run web", diff --git a/internal/appconfig/machines.go b/internal/appconfig/machines.go index 77ceb13530..c064bb02df 100644 --- a/internal/appconfig/machines.go +++ b/internal/appconfig/machines.go @@ -1,11 +1,7 @@ package appconfig import ( - "encoding/json" "fmt" - "io" - "os" - "strings" "github.com/docker/go-units" "github.com/google/shlex" @@ -14,6 +10,7 @@ import ( fly "github.com/superfly/fly-go" "github.com/superfly/flyctl/helpers" "github.com/superfly/flyctl/internal/buildinfo" + "github.com/superfly/flyctl/internal/containerconfig" ) func (c *Config) ToMachineConfig(processGroup string, src *fly.MachineConfig) (*fly.MachineConfig, error) { @@ -242,7 +239,7 @@ func (c *Config) ToConsoleMachineConfig() (*fly.MachineConfig, error) { // updateMachineConfig applies configuration options from the optional MachineConfig passed in, then the base config, into a new MachineConfig func (c *Config) updateMachineConfig(src *fly.MachineConfig) (*fly.MachineConfig, error) { - // For flattened app configs there is only one proces name and it is the group it was flattened for + // For flattened app configs there is only one process name and it is the group it was flattened for processGroup := c.DefaultProcessName() mConfig := &fly.MachineConfig{} @@ -250,28 +247,24 @@ func (c *Config) updateMachineConfig(src *fly.MachineConfig) (*fly.MachineConfig mConfig = helpers.Clone(src) } + // Extract machine config from fly.toml + var appMachineConfig string if c.Experimental != nil && len(c.Experimental.MachineConfig) > 0 { - emc := c.Experimental.MachineConfig - var buf []byte - switch { - case strings.HasPrefix(emc, "{"): - buf = []byte(emc) - case strings.HasSuffix(emc, ".json"): - fo, err := os.Open(emc) - if err != nil { - return nil, err - } - buf, err = io.ReadAll(fo) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("invalid machine config source: %q", emc) - } + appMachineConfig = c.Experimental.MachineConfig + } - if err := json.Unmarshal(buf, mConfig); err != nil { - return nil, fmt.Errorf("invalid machine config %q: %w", emc, err) - } + if appMachineConfig == "" { + appMachineConfig = c.MachineConfig + } + + // Parse container configuration (machine config or compose file) directly into mConfig + composePath := "" + if c.Build != nil && c.Build.Compose != nil { + // DetectComposeFile returns the explicit file if set, otherwise auto-detects + composePath = c.DetectComposeFile() + } + if err := containerconfig.ParseContainerConfig(mConfig, composePath, appMachineConfig, c.ConfigFilePath(), c.Container); err != nil { + return nil, err } // Metrics diff --git a/internal/appconfig/patches.go b/internal/appconfig/patches.go index f618b19e57..d4018e2369 100644 --- a/internal/appconfig/patches.go +++ b/internal/appconfig/patches.go @@ -167,6 +167,7 @@ func patchBuild(cfg map[string]any) (map[string]any, error) { switch k { case "build_target": cast["build-target"] = v + delete(cast, "build_target") } } @@ -240,6 +241,8 @@ func patchCompute(cfg map[string]any) (map[string]any, error) { } } cfg["vm"] = compute + delete(cfg, "compute") + delete(cfg, "computes") return cfg, nil } @@ -261,6 +264,7 @@ func patchMounts(cfg map[string]any) (map[string]any, error) { } } cfg["mounts"] = mounts + delete(cfg, "mount") return cfg, nil } @@ -276,6 +280,7 @@ func patchMetrics(cfg map[string]any) (map[string]any, error) { } } cfg["metrics"] = metrics + delete(cfg, "metric") return cfg, nil } diff --git a/internal/appconfig/serde.go b/internal/appconfig/serde.go index b1fdf5a88b..c32d3fc7b7 100644 --- a/internal/appconfig/serde.go +++ b/internal/appconfig/serde.go @@ -18,7 +18,7 @@ import ( "github.com/pelletier/go-toml/v2" "github.com/superfly/flyctl/helpers" "github.com/superfly/flyctl/iostreams" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" ) const flyConfigHeader = `# fly.%s app configuration file generated for %s on %s @@ -54,6 +54,32 @@ func LoadConfig(path string) (cfg *Config, err error) { return cfg, nil } +// LoadConfigAsMap loads the config as a map, which is useful for strict validation. +func LoadConfigAsMap(path string) (rawConfig map[string]any, err error) { + buf, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + // First unmarshal to get raw config map + rawConfig = map[string]any{} + if strings.HasSuffix(path, ".json") { + err = json.Unmarshal(buf, &rawConfig) + } else if strings.HasSuffix(path, ".yaml") { + err = yaml.Unmarshal(buf, &rawConfig) + if err == nil { + stringifyYAMLMapKeys(rawConfig) + } + } else { + err = toml.Unmarshal(buf, &rawConfig) + } + if err != nil { + return nil, err + } + + return patchRoot(rawConfig) +} + func (c *Config) WriteTo(w io.Writer, format string) (int64, error) { var b []byte var err error diff --git a/internal/appconfig/serde_test.go b/internal/appconfig/serde_test.go index 79456c7155..7aff9be484 100644 --- a/internal/appconfig/serde_test.go +++ b/internal/appconfig/serde_test.go @@ -43,6 +43,91 @@ func TestLoadTOMLAppConfigWithDockerfile(t *testing.T) { assert.Equal(t, p.Build.Dockerfile, "./Dockerfile") } +func TestLoadTOMLAppConfigWithCompose(t *testing.T) { + const path = "./testdata/compose.toml" + + p, err := LoadConfig(path) + require.NoError(t, err) + require.NotNil(t, p.Build) + require.NotNil(t, p.Build.Compose) + assert.Equal(t, p.Build.Compose.File, "docker-compose.yml") +} + +func TestLoadTOMLAppConfigWithComposeAutoDetect(t *testing.T) { + const path = "./testdata/compose-autodetect.toml" + + // Create a temporary compose.yaml file in the testdata directory + composeFile := "./testdata/compose.yaml" + err := os.WriteFile(composeFile, []byte("version: '3'\nservices:\n app:\n image: test\n"), 0644) + require.NoError(t, err) + defer os.Remove(composeFile) + + p, err := LoadConfig(path) + require.NoError(t, err) + require.NotNil(t, p.Build) + require.NotNil(t, p.Build.Compose) + assert.Equal(t, p.Build.Compose.File, "") // File is empty in config + + // Test the detection + detected := p.DetectComposeFile() + assert.Equal(t, "compose.yaml", detected) +} + +func TestDetectComposeFileWithExplicitFile(t *testing.T) { + const path = "./testdata/compose.toml" + + p, err := LoadConfig(path) + require.NoError(t, err) + require.NotNil(t, p.Build) + require.NotNil(t, p.Build.Compose) + + // When file is explicitly set, DetectComposeFile should return it + detected := p.DetectComposeFile() + assert.Equal(t, "docker-compose.yml", detected) +} + +func TestDetectComposeFile(t *testing.T) { + tmpDir := t.TempDir() + configPath := filepath.Join(tmpDir, "fly.toml") + + // Write a minimal config + err := os.WriteFile(configPath, []byte(`app = "test"`), 0644) + require.NoError(t, err) + + config, err := LoadConfig(configPath) + require.NoError(t, err) + + // Test each well-known filename + for _, filename := range WellKnownComposeFilenames { + t.Run(filename, func(t *testing.T) { + // Remove any existing compose files + for _, f := range WellKnownComposeFilenames { + os.Remove(filepath.Join(tmpDir, f)) + } + + // Create the test file + composePath := filepath.Join(tmpDir, filename) + err := os.WriteFile(composePath, []byte("test"), 0644) + require.NoError(t, err) + + // Test detection + detected := config.DetectComposeFile() + assert.Equal(t, filename, detected) + }) + } + + // Test when no compose file exists + t.Run("no compose file", func(t *testing.T) { + // Remove all compose files + for _, f := range WellKnownComposeFilenames { + os.Remove(filepath.Join(tmpDir, f)) + } + + detected := config.DetectComposeFile() + assert.Empty(t, detected) + }) +} + func TestLoadTOMLAppConfigWithBuilderNameAndArgs(t *testing.T) { const path = "./testdata/build-with-args.toml" @@ -507,10 +592,11 @@ func TestLoadTOMLAppConfigReferenceFormat(t *testing.T) { }, Mounts: []Mount{{ - Source: "data", - Destination: "/data", - InitialSize: "30gb", - SnapshotRetention: fly.Pointer(17), + Source: "data", + Destination: "/data", + InitialSize: "30gb", + SnapshotRetention: fly.Pointer(17), + ScheduledSnapshots: fly.BoolPointer(true), }}, Processes: map[string]string{ diff --git a/internal/appconfig/service.go b/internal/appconfig/service.go index 268cdb4f1f..f558f87ea5 100644 --- a/internal/appconfig/service.go +++ b/internal/appconfig/service.go @@ -121,8 +121,8 @@ func (svc *Service) toMachineService() *fly.MachineService { return s } -func (chk *ServiceHTTPCheck) toMachineCheck() *fly.MachineCheck { - return &fly.MachineCheck{ +func (chk *ServiceHTTPCheck) toMachineCheck() *fly.MachineServiceCheck { + return &fly.MachineServiceCheck{ Type: fly.Pointer("http"), Interval: chk.Interval, Timeout: chk.Timeout, @@ -143,8 +143,8 @@ func (chk *ServiceHTTPCheck) String(port int) string { return fmt.Sprintf("http-%d-%v", port, chk.HTTPMethod) } -func (chk *ServiceTCPCheck) toMachineCheck() *fly.MachineCheck { - return &fly.MachineCheck{ +func (chk *ServiceTCPCheck) toMachineCheck() *fly.MachineServiceCheck { + return &fly.MachineServiceCheck{ Type: fly.Pointer("tcp"), Interval: chk.Interval, Timeout: chk.Timeout, @@ -185,7 +185,7 @@ func serviceFromMachineService(ctx context.Context, ms fly.MachineService, proce } } -func tcpCheckFromMachineCheck(mc fly.MachineCheck) *ServiceTCPCheck { +func tcpCheckFromMachineCheck(mc fly.MachineServiceCheck) *ServiceTCPCheck { return &ServiceTCPCheck{ Interval: mc.Interval, Timeout: mc.Timeout, @@ -193,7 +193,7 @@ func tcpCheckFromMachineCheck(mc fly.MachineCheck) *ServiceTCPCheck { } } -func httpCheckFromMachineCheck(ctx context.Context, mc fly.MachineCheck) *ServiceHTTPCheck { +func httpCheckFromMachineCheck(ctx context.Context, mc fly.MachineServiceCheck) *ServiceHTTPCheck { headers := make(map[string]string) for _, h := range mc.HTTPHeaders { if len(h.Values) > 0 { diff --git a/internal/appconfig/setters.go b/internal/appconfig/setters.go index f626fca6d3..8b497c3ff3 100644 --- a/internal/appconfig/setters.go +++ b/internal/appconfig/setters.go @@ -19,34 +19,24 @@ func (c *Config) SetInternalPort(port int) { } func (c *Config) SetHttpCheck(path string, headers map[string]string) { + check := &ServiceHTTPCheck{ + HTTPMethod: fly.StringPointer("GET"), + HTTPPath: fly.StringPointer(path), + HTTPProtocol: fly.StringPointer("http"), + HTTPTLSSkipVerify: fly.BoolPointer(false), + Interval: &fly.Duration{Duration: 10 * time.Second}, + Timeout: &fly.Duration{Duration: 2 * time.Second}, + GracePeriod: &fly.Duration{Duration: 5 * time.Second}, + HTTPHeaders: headers, + } + switch { case c.HTTPService != nil: - if c.Checks == nil { - c.Checks = make(map[string]*ToplevelCheck) - } - c.Checks["status"] = &ToplevelCheck{ - Port: fly.Pointer(c.HTTPService.InternalPort), - Type: fly.Pointer("http"), - HTTPMethod: fly.StringPointer("GET"), - HTTPPath: fly.StringPointer(path), - HTTPProtocol: fly.StringPointer("http"), - HTTPTLSSkipVerify: fly.BoolPointer(false), - Interval: &fly.Duration{Duration: 10 * time.Second}, - Timeout: &fly.Duration{Duration: 2 * time.Second}, - GracePeriod: &fly.Duration{Duration: 5 * time.Second}, - HTTPHeaders: headers, - } + service := c.HTTPService + service.HTTPChecks = append(service.HTTPChecks, check) case len(c.Services) > 0: service := &c.Services[0] - service.HTTPChecks = append(service.HTTPChecks, &ServiceHTTPCheck{ - HTTPMethod: fly.StringPointer("GET"), - HTTPPath: fly.StringPointer(path), - HTTPProtocol: fly.StringPointer("http"), - HTTPTLSSkipVerify: fly.BoolPointer(false), - Interval: &fly.Duration{Duration: 10 * time.Second}, - Timeout: &fly.Duration{Duration: 2 * time.Second}, - GracePeriod: &fly.Duration{Duration: 5 * time.Second}, - }) + service.HTTPChecks = append(service.HTTPChecks, check) } } diff --git a/internal/appconfig/setters_test.go b/internal/appconfig/setters_test.go index c635c9dbcd..65363a3c04 100644 --- a/internal/appconfig/setters_test.go +++ b/internal/appconfig/setters_test.go @@ -52,11 +52,7 @@ func TestSettersWithHTTPService(t *testing.T) { HardLimit: 34, SoftLimit: 12, }, - }) - assert.Equal(t, cfg.Checks, map[string]*ToplevelCheck{ - "status": { - Port: fly.Pointer(1234), - Type: fly.Pointer("http"), + HTTPChecks: []*ServiceHTTPCheck{{ Interval: fly.MustParseDuration("10s"), Timeout: fly.MustParseDuration("2s"), GracePeriod: fly.MustParseDuration("5s"), @@ -64,7 +60,7 @@ func TestSettersWithHTTPService(t *testing.T) { HTTPPath: fly.Pointer("/status"), HTTPProtocol: fly.Pointer("http"), HTTPTLSSkipVerify: fly.Pointer(false), - }, + }}, }) } diff --git a/internal/appconfig/strict_validate.go b/internal/appconfig/strict_validate.go new file mode 100644 index 0000000000..0509129b4a --- /dev/null +++ b/internal/appconfig/strict_validate.go @@ -0,0 +1,272 @@ +package appconfig + +import ( + "fmt" + "reflect" + "strings" + + io "github.com/superfly/flyctl/iostreams" +) + +// StrictValidateResult contains the results of strict validation +type StrictValidateResult struct { + UnrecognizedSections []string + UnrecognizedKeys map[string][]string // section -> keys +} + +// StrictValidate performs strict validation on a raw configuration map +// by checking for unrecognized sections and keys using reflection on the Config type +func StrictValidate(rawConfig map[string]any) *StrictValidateResult { + result := &StrictValidateResult{ + UnrecognizedSections: []string{}, + UnrecognizedKeys: make(map[string][]string), + } + + recognizedFields := getFields(reflect.TypeOf(Config{})) + + // Check each key in the raw config + for key, value := range rawConfig { + fieldInfo, recognized := recognizedFields[key] + if !recognized { + result.UnrecognizedSections = append(result.UnrecognizedSections, key) + continue + } + + // If this is a map or section, check its nested keys + if fieldInfo.isNested && value != nil { + validateNestedSection(key, value, fieldInfo.fieldType, result) + } + } + + return result +} + +// fieldInfo stores information about a struct field +type fieldInfo struct { + fieldType reflect.Type + isNested bool +} + +// getFields extracts all recognized field names from struct tags +func getFields(t reflect.Type) map[string]fieldInfo { + fields := make(map[string]fieldInfo) + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + + // Skip unexported fields + if !field.IsExported() { + continue + } + + // Check for toml tag first, then json tag + tomlTag := field.Tag.Get("toml") + jsonTag := field.Tag.Get("json") + + // Skip fields marked with "-" + if tomlTag == "-" || jsonTag == "-" { + continue + } + + // Parse tag to get field name + var fieldName string + if tomlTag != "" { + fieldName = strings.Split(tomlTag, ",")[0] + } else if jsonTag != "" { + fieldName = strings.Split(jsonTag, ",")[0] + } else { + // Use field name if no tags + fieldName = strings.ToLower(field.Name) + } + + if fieldName == "" || fieldName == "-" { + continue + } + + // Determine if this is a nested type that needs further validation + fieldType := field.Type + + // Dereference pointers + if fieldType.Kind() == reflect.Ptr { + fieldType = fieldType.Elem() + } + + isNested := isNestedType(fieldType) + + fields[fieldName] = fieldInfo{ + fieldType: fieldType, + isNested: isNested, + } + } + + return fields +} + +func isNestedType(t reflect.Type) bool { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() == reflect.Struct && + !isBuiltinType(t) { + return true + } + + // Check if it's a slice of structs + if t.Kind() == reflect.Slice { + elemType := t.Elem() + if elemType.Kind() == reflect.Ptr { + elemType = elemType.Elem() + } + if elemType.Kind() == reflect.Struct && !isBuiltinType(elemType) { + return true + } + } + + if t.Kind() == reflect.Map { + return isNestedType(t.Elem()) + } + + return false +} + +// isBuiltinType checks if a type is a builtin that shouldn't be recursively validated +func isBuiltinType(t reflect.Type) bool { + pkg := t.PkgPath() + return (pkg == "" || strings.HasPrefix(pkg, "time")) +} + +// validateNestedSection validates keys within a nested section +func validateNestedSection(sectionName string, value any, expectedType reflect.Type, result *StrictValidateResult) { + if valueMap, ok := value.(map[string]any); ok { + // Dereference pointer types + if expectedType.Kind() == reflect.Ptr { + expectedType = expectedType.Elem() + } + + // For regular structs, validate against struct fields + if expectedType.Kind() == reflect.Struct { + validateStructKeys(sectionName, valueMap, expectedType, result) + return + + } + + // For maps, validate each key if it's a nested type + if expectedType.Kind() == reflect.Map && isNestedType(expectedType.Elem()) { + subType := expectedType.Elem() + if subType.Kind() == reflect.Ptr { + subType = subType.Elem() + } + + for key, value := range valueMap { + section := fmt.Sprintf("%s.%s", sectionName, key) + validateNestedSection(section, value, subType, result) + } + return + } + } + + // For slices, validate each element if it's a nested type + if valueSlice, ok := value.([]any); ok && expectedType.Kind() == reflect.Slice { + elemType := expectedType.Elem() + if elemType.Kind() == reflect.Ptr { + elemType = elemType.Elem() + } + + if isNestedType(elemType) { + for i, elem := range valueSlice { + section := fmt.Sprintf("%s[%d]", sectionName, i) + validateNestedSection(section, elem, elemType, result) + } + return + } + } +} + +// validateStructKeys validates that all keys in a map are recognized fields in the struct +func validateStructKeys(sectionPath string, data map[string]any, structType reflect.Type, result *StrictValidateResult) { + recognizedFields := getFields(structType) + + // Check for inline embedded structs + inlineFields := getInlineFields(structType) + + for key, value := range data { + // First check regular fields + fieldInfo, recognized := recognizedFields[key] + + // If not found in regular fields, check inline embedded fields + if !recognized { + for _, inlineType := range inlineFields { + inlineRecognized := getFields(inlineType) + if _, ok := inlineRecognized[key]; ok { + recognized = true + break + } + } + } + + if !recognized { + if result.UnrecognizedKeys[sectionPath] == nil { + result.UnrecognizedKeys[sectionPath] = []string{} + } + result.UnrecognizedKeys[sectionPath] = append(result.UnrecognizedKeys[sectionPath], key) + continue + } + + // If this field is also nested, validate it recursively + if recognized && fieldInfo.isNested && value != nil { + nestedPath := fmt.Sprintf("%s.%s", sectionPath, key) + validateNestedSection(nestedPath, value, fieldInfo.fieldType, result) + } + } +} + +// getInlineFields finds all fields with inline tags +func getInlineFields(t reflect.Type) []reflect.Type { + var inlineTypes []reflect.Type + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + + // Check if field has inline tag + tomlTag := field.Tag.Get("toml") + jsonTag := field.Tag.Get("json") + + if strings.Contains(tomlTag, "inline") || strings.Contains(jsonTag, "inline") { + fieldType := field.Type + if fieldType.Kind() == reflect.Ptr { + fieldType = fieldType.Elem() + } + inlineTypes = append(inlineTypes, fieldType) + } + } + + return inlineTypes +} + +// FormatStrictValidationErrors formats the strict validation results as a user-friendly string +func FormatStrictValidationErrors(result *StrictValidateResult) string { + if len(result.UnrecognizedSections) == 0 && len(result.UnrecognizedKeys) == 0 { + return "" + } + + var parts []string + + scheme := io.System().ColorScheme() + + if len(result.UnrecognizedSections) > 0 { + for _, section := range result.UnrecognizedSections { + parts = append(parts, fmt.Sprintf(" - %s", scheme.Red(section))) + } + } + + if len(result.UnrecognizedKeys) > 0 { + for section, keys := range result.UnrecognizedKeys { + for _, key := range keys { + parts = append(parts, fmt.Sprintf(" - %s.%s", section, scheme.Red(key))) + } + } + } + + return strings.Join(parts, "\n") +} diff --git a/internal/appconfig/strict_validate_test.go b/internal/appconfig/strict_validate_test.go new file mode 100644 index 0000000000..fe2a75ded9 --- /dev/null +++ b/internal/appconfig/strict_validate_test.go @@ -0,0 +1,197 @@ +package appconfig + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStrictValidate(t *testing.T) { + tests := []struct { + name string + config string + wantUnrecognizedSections []string + wantUnrecognizedKeys map[string][]string + }{ + { + name: "valid config", + config: ` + app = "test-app" + primary_region = "iad" + + [build] + builder = "dockerfile" + + [env] + NODE_ENV = "production" + `, + wantUnrecognizedSections: nil, + wantUnrecognizedKeys: nil, + }, + { + name: "unrecognized top-level section", + config: ` + app = "test-app" + + [unknown_section] + key = "value" + `, + wantUnrecognizedSections: []string{"unknown_section"}, + wantUnrecognizedKeys: nil, + }, + { + name: "unrecognized key in build section", + config: ` + app = "test-app" + + [build] + builder = "dockerfile" + unknown_key = "value" + `, + wantUnrecognizedSections: nil, + wantUnrecognizedKeys: map[string][]string{"build": {"unknown_key"}}, + }, + { + name: "unrecognized key in checks value", + config: ` + app = "test-app" + + [checks.health_check] + type = "http" + invalid_key = "value" + `, + wantUnrecognizedSections: nil, + wantUnrecognizedKeys: map[string][]string{"checks.health_check": {"invalid_key"}}, + }, + { + name: "real-world example", + config: ` + app = "bla" + primary_region = "mia" + console_command = "bin/rails console" + + [build] + dockerfile = "Dockerfile.web" + build-target = "deploy" + + [build.args] + APP_URL = "https://staging.floridacims.org" + RAILS_ENV = "staging" + RACK_ENV = "staging" + APPUID = "1000" + APPGID = "1000" + + [deploy] + processes = ["app"] + release_command = "./bin/rails db:prepare" + strategy = "bluegreen" + + [env] + RAILS_MAX_THREADS = 5 + + [http_service] + processes = ["app"] + internal_port = 3000 + auto_stop_machines = "suspend" + auto_start_machines = true + min_machines_running = 1 + + [[http_service.checks]] + processes = ['app'] + grace_period = "10s" + interval = "30s" + protocol = "http" + method = "GET" + timeout = "5s" + path = "/up" + + [[http_machine.checks]] + processes = ['app'] + grace_period = "30s" + image = "curlimages/curl" + entrypoint = ["/bin/sh", "-c"] + command = ["curl http://[$FLY_TEST_MACHINE_IP]/up | grep 'background-color: green'"] + kill_signal = "SIGKILL" + kill_timeout = "5s" + + [[http_service.machine_checks]] + processes = ['app'] + grace_period = "30s" + image = "curlimages/curl" + entrypoint = ["/bin/sh", "-c"] + command = ["curl http://[$FLY_TEST_MACHINE_IP]/up | grep 'background-color: green'"] + kill_signal = "SIGKILL" + kill_timeout = "5s" + + [http_service.concurrency] + processes = ['app'] + type = "requests" + soft_limit = 50 + hard_limit = 70 + + [http_service.http_options] + h2_backend = true + xyz = "123" + + [[vm]] + processes = ["app"] + size = "shared-cpu-2x" + memory = '2gb' + + [[vm]] + processes = ["worker"] + size = "shared-cpu-2x" + memory = '2gb' + + [[statics]] + guest_path = "/rails/public" + url_prefix = "/" + + [processes] + app = "bundle exec rails s -b 0.0.0.0 -p 3000" + worker = "bundle exec sidekiq" + + [checks.my_check_bla] + type = "http" + grace_period = "30s" + invalid_key = 123 + `, + wantUnrecognizedSections: []string{"http_machine"}, + wantUnrecognizedKeys: map[string][]string{ + "http_service.checks[0]": {"processes"}, + "checks.my_check_bla": {"invalid_key"}, + "deploy": {"processes"}, + "http_service.machine_checks[0]": {"grace_period", "processes"}, + "http_service.concurrency": {"processes"}, + "http_service.http_options": {"xyz"}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + f, err := os.CreateTemp("", "fly-*.toml") + assert.NoError(t, err) + defer os.Remove(f.Name()) + + _, err = f.WriteString(tt.config) + assert.NoError(t, err) + + rawConfig, err := LoadConfigAsMap(f.Name()) + assert.NoError(t, err) + + result := StrictValidate(rawConfig) + + assert.ElementsMatch(t, result.UnrecognizedSections, tt.wantUnrecognizedSections) + + assert.Len(t, result.UnrecognizedKeys, len(tt.wantUnrecognizedKeys)) + for section, keys := range tt.wantUnrecognizedKeys { + gotKeys, ok := result.UnrecognizedKeys[section] + assert.True(t, ok) + + assert.ElementsMatch(t, gotKeys, keys) + } + }) + } +} diff --git a/internal/appconfig/testdata/compose-autodetect.toml b/internal/appconfig/testdata/compose-autodetect.toml new file mode 100644 index 0000000000..53b526a795 --- /dev/null +++ b/internal/appconfig/testdata/compose-autodetect.toml @@ -0,0 +1,4 @@ +app = "test-app" + +[build.compose] +# No file specified - should auto-detect diff --git a/internal/appconfig/testdata/compose.toml b/internal/appconfig/testdata/compose.toml new file mode 100644 index 0000000000..b4f3e1c989 --- /dev/null +++ b/internal/appconfig/testdata/compose.toml @@ -0,0 +1,4 @@ +app = "test-app" + +[build] +compose.file = "docker-compose.yml" diff --git a/internal/appconfig/testdata/full-reference.toml b/internal/appconfig/testdata/full-reference.toml index b81d6a0861..e2b3c27b1f 100644 --- a/internal/appconfig/testdata/full-reference.toml +++ b/internal/appconfig/testdata/full-reference.toml @@ -128,6 +128,7 @@ host_dedication_id = "06031957" initial_size = "30gb" destination = "/data" snapshot_retention = 17 + scheduled_snapshots = true [[vm]] size = "shared-cpu-1x" diff --git a/internal/appconfig/validation.go b/internal/appconfig/validation.go index e41510e1d3..0b659db316 100644 --- a/internal/appconfig/validation.go +++ b/internal/appconfig/validation.go @@ -13,6 +13,7 @@ import ( "github.com/logrusorgru/aurora" fly "github.com/superfly/fly-go" "github.com/superfly/flyctl/helpers" + "github.com/superfly/flyctl/internal/flag/validation" "github.com/superfly/flyctl/internal/sentry" ) @@ -21,24 +22,25 @@ var ( MachinesDeployStrategies = []string{"canary", "rolling", "immediate", "bluegreen"} ) -func (cfg *Config) Validate(ctx context.Context) (err error, extra_info string) { - if cfg == nil { +func (c *Config) Validate(ctx context.Context) (err error, extra_info string) { + if c == nil { return errors.New("App config file not found"), "" } validators := []func() (string, error){ - cfg.validateBuildStrategies, - cfg.validateDeploySection, - cfg.validateChecksSection, - cfg.validateServicesSection, - cfg.validateProcessesSection, - cfg.validateMachineConversion, - cfg.validateConsoleCommand, - cfg.validateMounts, - cfg.validateRestartPolicy, + c.validateBuildStrategies, + c.validateDeploySection, + c.validateChecksSection, + c.validateServicesSection, + c.validateProcessesSection, + c.validateMachineConversion, + c.validateConsoleCommand, + c.validateMounts, + c.validateRestartPolicy, + c.validateCompression, } - extra_info = fmt.Sprintf("Validating %s\n", cfg.ConfigFilePath()) + extra_info = fmt.Sprintf("Validating %s\n", c.ConfigFilePath()) for _, vFunc := range validators { info, vErr := vFunc() @@ -48,8 +50,8 @@ func (cfg *Config) Validate(ctx context.Context) (err error, extra_info string) } } - if cfg.v2UnmarshalError != nil { - err = cfg.v2UnmarshalError + if c.v2UnmarshalError != nil { + err = c.v2UnmarshalError } if err != nil { @@ -61,13 +63,13 @@ func (cfg *Config) Validate(ctx context.Context) (err error, extra_info string) return nil, extra_info } -func (cfg *Config) ValidateGroups(ctx context.Context, groups []string) (err error, extraInfo string) { +func (c *Config) ValidateGroups(ctx context.Context, groups []string) (err error, extraInfo string) { if len(groups) == 0 { - return cfg.Validate(ctx) + return c.Validate(ctx) } var config *Config for _, group := range groups { - config, err = cfg.Flatten(group) + config, err = c.Flatten(group) if err != nil { return } @@ -79,8 +81,8 @@ func (cfg *Config) ValidateGroups(ctx context.Context, groups []string) (err err return } -func (cfg *Config) validateBuildStrategies() (extraInfo string, err error) { - buildStrats := cfg.BuildStrategies() +func (c *Config) validateBuildStrategies() (extraInfo string, err error) { + buildStrats := c.BuildStrategies() if len(buildStrats) > 1 { // TODO: validate that most users are not affected by this and/or fixing this, then make it fail validation msg := fmt.Sprintf("%s more than one build configuration found: [%s]", aurora.Yellow("WARN"), strings.Join(buildStrats, ", ")) @@ -90,17 +92,17 @@ func (cfg *Config) validateBuildStrategies() (extraInfo string, err error) { return } -func (cfg *Config) validateDeploySection() (extraInfo string, err error) { - if cfg.Deploy == nil { +func (c *Config) validateDeploySection() (extraInfo string, err error) { + if c.Deploy == nil { return } - if _, vErr := shlex.Split(cfg.Deploy.ReleaseCommand); vErr != nil { - extraInfo += fmt.Sprintf("Can't shell split release command: '%s'\n", cfg.Deploy.ReleaseCommand) + if _, vErr := shlex.Split(c.Deploy.ReleaseCommand); vErr != nil { + extraInfo += fmt.Sprintf("Can't shell split release command: '%s'\n", c.Deploy.ReleaseCommand) err = ValidationError } - if s := cfg.Deploy.Strategy; s != "" { + if s := c.Deploy.Strategy; s != "" { if !slices.Contains(MachinesDeployStrategies, s) { extraInfo += fmt.Sprintf( "unsupported deployment strategy '%s'; Apps v2 supports the following strategies: %s", s, @@ -109,7 +111,7 @@ func (cfg *Config) validateDeploySection() (extraInfo string, err error) { err = ValidationError } - if s == "canary" && len(cfg.Mounts) > 0 { + if s == "canary" && len(c.Mounts) > 0 { extraInfo += "error canary deployment strategy is not supported when using mounted volumes" err = ValidationError } @@ -118,8 +120,8 @@ func (cfg *Config) validateDeploySection() (extraInfo string, err error) { return } -func (cfg *Config) validateChecksSection() (extraInfo string, err error) { - for name, check := range cfg.Checks { +func (c *Config) validateChecksSection() (extraInfo string, err error) { + for name, check := range c.Checks { if _, vErr := check.toMachineCheck(); vErr != nil { extraInfo += fmt.Sprintf("Can't process top level check '%s': %s\n", name, vErr) err = ValidationError @@ -140,13 +142,13 @@ func (cfg *Config) validateChecksSection() (extraInfo string, err error) { return } -func (cfg *Config) validateServicesSection() (extraInfo string, err error) { - validGroupNames := cfg.ProcessNames() +func (c *Config) validateServicesSection() (extraInfo string, err error) { + validGroupNames := c.ProcessNames() // The following is different than len(validGroupNames) because // it can be zero when there is no [processes] section - processCount := len(cfg.Processes) + processCount := len(c.Processes) - for _, service := range cfg.AllServices() { + for _, service := range c.AllServices() { switch { case len(service.Processes) == 0 && processCount > 0: extraInfo += fmt.Sprintf( @@ -175,7 +177,7 @@ func (cfg *Config) validateServicesSection() (extraInfo string, err error) { "Check docs at https://fly.io/docs/reference/configuration/#services-ports \n " + "Validation for _services without ports_ will hard fail after February 15, 2024.", ) - //err = ValidationError + // err = ValidationError } for _, check := range service.TCPChecks { @@ -219,8 +221,8 @@ func validateSingleServiceCheckDuration(d *fly.Duration, zeroOK bool, proto, des return } -func (cfg *Config) validateProcessesSection() (extraInfo string, err error) { - for processName, cmdStr := range cfg.Processes { +func (c *Config) validateProcessesSection() (extraInfo string, err error) { + for processName, cmdStr := range c.Processes { if cmdStr == "" { continue } @@ -238,9 +240,9 @@ func (cfg *Config) validateProcessesSection() (extraInfo string, err error) { return extraInfo, err } -func (cfg *Config) validateMachineConversion() (extraInfo string, err error) { - for _, name := range cfg.ProcessNames() { - if _, vErr := cfg.ToMachineConfig(name, nil); err != nil { +func (c *Config) validateMachineConversion() (extraInfo string, err error) { + for _, name := range c.ProcessNames() { + if _, vErr := c.ToMachineConfig(name, nil); err != nil { extraInfo += fmt.Sprintf("Converting to machine in process group '%s' will fail because of: %s", name, vErr) err = ValidationError } @@ -248,21 +250,21 @@ func (cfg *Config) validateMachineConversion() (extraInfo string, err error) { return } -func (cfg *Config) validateConsoleCommand() (extraInfo string, err error) { - if _, vErr := shlex.Split(cfg.ConsoleCommand); vErr != nil { - extraInfo += fmt.Sprintf("Can't shell split console command: '%s'\n", cfg.ConsoleCommand) +func (c *Config) validateConsoleCommand() (extraInfo string, err error) { + if _, vErr := shlex.Split(c.ConsoleCommand); vErr != nil { + extraInfo += fmt.Sprintf("Can't shell split console command: '%s'\n", c.ConsoleCommand) err = ValidationError } return } -func (cfg *Config) validateMounts() (extraInfo string, err error) { - if cfg.configFilePath == "--flatten--" && len(cfg.Mounts) > 1 { - extraInfo += fmt.Sprintf("group '%s' has more than one [[mounts]] section defined\n", cfg.defaultGroupName) +func (c *Config) validateMounts() (extraInfo string, err error) { + if c.configFilePath == "--flatten--" && len(c.Mounts) > 1 { + extraInfo += fmt.Sprintf("group '%s' has more than one [[mounts]] section defined\n", c.defaultGroupName) err = ValidationError } - for _, m := range cfg.Mounts { + for _, m := range c.Mounts { if m.InitialSize != "" { v, vErr := helpers.ParseSize(m.InitialSize, units.FromHumanSize, units.GB) switch { @@ -327,13 +329,13 @@ func (cfg *Config) validateMounts() (extraInfo string, err error) { return } -func (cfg *Config) validateRestartPolicy() (extraInfo string, err error) { - if cfg.Restart == nil { +func (c *Config) validateRestartPolicy() (extraInfo string, err error) { + if c.Restart == nil { return } - for _, restart := range cfg.Restart { - validGroupNames := cfg.ProcessNames() + for _, restart := range c.Restart { + validGroupNames := c.ProcessNames() // first make sure restart.Processes matches a valid process name. for _, processName := range restart.Processes { @@ -355,3 +357,23 @@ func (cfg *Config) validateRestartPolicy() (extraInfo string, err error) { return } + +func (c *Config) validateCompression() (extraInfo string, err error) { + if c.Build != nil { + if c.Build.Compression != "" { + if vErr := validation.ValidateCompressionFlag(c.Build.Compression); vErr != nil { + extraInfo += fmt.Sprintf("%s\n", vErr.Error()) + err = ValidationError + } + } + + if c.Build.CompressionLevel != nil { + if vErr := validation.ValidateCompressionLevelFlag(*c.Build.CompressionLevel); vErr != nil { + extraInfo += fmt.Sprintf("%s\n", vErr.Error()) + err = ValidationError + } + } + } + + return +} diff --git a/internal/appsecrets/minvers.go b/internal/appsecrets/minvers.go new file mode 100644 index 0000000000..3bd117c7c9 --- /dev/null +++ b/internal/appsecrets/minvers.go @@ -0,0 +1,64 @@ +package appsecrets + +import ( + "context" + + "github.com/pkg/errors" + "github.com/spf13/viper" + + "github.com/superfly/flyctl/flyctl" + "github.com/superfly/flyctl/internal/config" + "github.com/superfly/flyctl/internal/state" +) + +func getMinvers() (config.AppSecretsMinvers, error) { + minvers := config.AppSecretsMinvers{} + if err := viper.UnmarshalKey(flyctl.ConfigAppSecretsMinvers, &minvers); err != nil { + return nil, errors.Wrap(err, "invalid application secrets minversions") + } + return minvers, nil +} + +// GetAppSecretsMinvers returns the minimum secrets version for appName if known or nil. +func GetMinvers(appName string) (*uint64, error) { + minvers, err := getMinvers() + if err != nil { + return nil, err + } + + if v, ok := minvers[appName]; ok { + return &v, nil + } + return nil, nil +} + +func setMinvers(ctx context.Context, appName string, v *uint64) error { + minvers, err := getMinvers() + if err != nil { + return err + } + + if v == nil { + delete(minvers, appName) + } else { + minvers[appName] = *v + } + + viper.Set(flyctl.ConfigAppSecretsMinvers, minvers) + configPath := state.ConfigFile(ctx) + if err := config.SetAppSecretsMinvers(configPath, minvers); err != nil { + return errors.Wrap(err, "error saving config file") + } + + return nil +} + +// SetMinvers sets the minimum secrets version for appName and saves it. +func SetMinvers(ctx context.Context, appName string, v uint64) error { + return setMinvers(ctx, appName, &v) +} + +// DeleteMinvers removes the minimum secrets version for appName and saves it. +func DeleteMinvers(ctx context.Context, appName string) error { + return setMinvers(ctx, appName, nil) +} diff --git a/internal/appsecrets/secrets.go b/internal/appsecrets/secrets.go new file mode 100644 index 0000000000..5e052fcf10 --- /dev/null +++ b/internal/appsecrets/secrets.go @@ -0,0 +1,62 @@ +package appsecrets + +import ( + "context" + crand "crypto/rand" + "encoding/hex" + "fmt" + + "github.com/superfly/fly-go" + + "github.com/superfly/flyctl/internal/flapsutil" +) + +// List returns a list of app secrets. client must be a flaps client for appName. +// List will use the best known minvers for appName when listing secrets. +func List(ctx context.Context, client flapsutil.FlapsClient, appName string) ([]fly.AppSecret, error) { + minver, err := GetMinvers(appName) + if err != nil { + return nil, err + } + return client.ListAppSecrets(ctx, minver, false) +} + +// Update sets setSecrets and unsets unsetSecrets. client must be a flaps client for appName. +// It is not an error to unset a secret that does not exist. +// Update will keep track of the secrets minvers for appName after successfully changing secrets. +func Update(ctx context.Context, client flapsutil.FlapsClient, appName string, setSecrets map[string]string, unsetSecrets []string) error { + update := map[string]*string{} + for name, value := range setSecrets { + value := value + update[name] = &value + } + for _, name := range unsetSecrets { + update[name] = nil + } + + if len(update) == 0 { + return nil + } + + resp, err := client.UpdateAppSecrets(ctx, update) + if err != nil { + return err + } + + if err := SetMinvers(ctx, appName, resp.Version); err != nil { + return err + } + return nil +} + +// Sync sets the min version for the app to the current min version, allowing +// any previously set secret to be visible in deploys. +func Sync(ctx context.Context, client flapsutil.FlapsClient, appName string) error { + // This is somewhat of a hack -- we unset an non-existent secret and + // we get back the latest min version after the unset. + rand := make([]byte, 8) + _, _ = crand.Read(rand) + bogusDummySecret := fmt.Sprintf("BogusDummySecret_%s", hex.EncodeToString(rand)) + unsetSecrets := []string{bogusDummySecret} + return Update(ctx, client, appName, nil, unsetSecrets) +} diff --git a/internal/build/imgsrc/buildkit.go b/internal/build/imgsrc/buildkit.go index e9e8e5becf..28d31e8db2 100644 --- a/internal/build/imgsrc/buildkit.go +++ b/internal/build/imgsrc/buildkit.go @@ -8,8 +8,10 @@ import ( "github.com/docker/docker/api/types" dockerclient "github.com/docker/docker/client" + "github.com/moby/buildkit/client" "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/auth" + "github.com/moby/buildkit/util/progress/progressui" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -31,14 +33,14 @@ func buildkitEnabled(docker *dockerclient.Client) (buildkitEnabled bool, err err return buildkitEnabled, nil } -func newBuildkitAuthProvider(token string) session.Attachable { +func newBuildkitAuthProvider(tokenGetter func() string) session.Attachable { return &buildkitAuthProvider{ - token: token, + tokenGetter: tokenGetter, } } type buildkitAuthProvider struct { - token string + tokenGetter func() string } func (ap *buildkitAuthProvider) Register(server *grpc.Server) { @@ -46,7 +48,11 @@ func (ap *buildkitAuthProvider) Register(server *grpc.Server) { } func (ap *buildkitAuthProvider) Credentials(ctx context.Context, req *auth.CredentialsRequest) (*auth.CredentialsResponse, error) { - auths := authConfigs(ap.token) + token := "" + if ap.tokenGetter != nil { + token = ap.tokenGetter() + } + auths := authConfigs(token) res := &auth.CredentialsResponse{} if a, ok := auths[req.Host]; ok { res.Username = a.Username @@ -67,3 +73,21 @@ func (ap *buildkitAuthProvider) GetTokenAuthority(ctx context.Context, req *auth func (ap *buildkitAuthProvider) VerifyTokenAuthority(ctx context.Context, req *auth.VerifyTokenAuthorityRequest) (*auth.VerifyTokenAuthorityResponse, error) { return nil, status.Errorf(codes.Unavailable, "client side tokens disabled") } + +func newDisplay(statusCh chan *client.SolveStatus) func() error { + return func() error { + display, err := progressui.NewDisplay(os.Stderr, progressui.DisplayMode(os.Getenv("BUILDKIT_PROGRESS"))) + if err != nil { + return err + } + + // UpdateFrom must not use the incoming context. + // Cancelling this context kills the reader of statusCh which blocks buildkit.Client's Solve() indefinitely. + // Solve() closes statusCh at the end and UpdateFrom returns by reading the closed channel. + // + // See https://github.com/superfly/flyctl/pull/2682 for the context. + _, err = display.UpdateFrom(context.Background(), statusCh) + return err + + } +} diff --git a/internal/build/imgsrc/buildkit_builder.go b/internal/build/imgsrc/buildkit_builder.go new file mode 100644 index 0000000000..823b360fd7 --- /dev/null +++ b/internal/build/imgsrc/buildkit_builder.go @@ -0,0 +1,200 @@ +package imgsrc + +import ( + "context" + "fmt" + "io" + "net" + "os" + + "github.com/containerd/containerd/api/services/content/v1" + "github.com/moby/buildkit/client" + "github.com/superfly/fly-go" + "github.com/superfly/flyctl/agent" + "github.com/superfly/flyctl/helpers" + "github.com/superfly/flyctl/internal/cmdfmt" + "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flyutil" + "github.com/superfly/flyctl/internal/tracing" + "github.com/superfly/flyctl/iostreams" + "github.com/superfly/flyctl/terminal" + "go.opentelemetry.io/otel/trace" +) + +var _ imageBuilder = (*BuildkitBuilder)(nil) + +type BuildkitBuilder struct { + // addr is the address of the Buildkit daemon. + // The client may need a WireGuard connection to reach the address. + addr string + + // provisioner is used to provision a builder machine if needed. + provisioner *Provisioner +} + +// NewBuildkitBuilder creates a builder that directly uses Buildkit instead of Docker Engine. +// addr is the address of the deamon (e.g. "foobar.flycast:1234" which is optional). +func NewBuildkitBuilder(addr string, provisioner *Provisioner) *BuildkitBuilder { + if !provisioner.UseBuildkit() { + panic("provisioner must be configured to use Buildkit") + } + + return &BuildkitBuilder{addr: addr, provisioner: provisioner} +} + +func (r *BuildkitBuilder) Name() string { return "Buildkit" } + +func (r *BuildkitBuilder) Run(ctx context.Context, _ *dockerClientFactory, streams *iostreams.IOStreams, opts ImageOptions, build *build) (*DeploymentImage, string, error) { + ctx, span := tracing.GetTracer().Start(ctx, "buildkit_builder", trace.WithAttributes(opts.ToSpanAttributes()...)) + defer span.End() + + build.BuildStart() + defer build.BuildFinish() + + var dockerfile string + + switch { + case opts.DockerfilePath != "" && !helpers.FileExists(opts.DockerfilePath): + return nil, "", fmt.Errorf("dockerfile '%s' not found", opts.DockerfilePath) + case opts.DockerfilePath != "": + dockerfile = opts.DockerfilePath + default: + dockerfile = ResolveDockerfile(opts.WorkingDir) + } + + if dockerfile == "" { + terminal.Debug("dockerfile not found, skipping") + return nil, "", nil + } + + build.ImageBuildStart() + defer build.ImageBuildFinish() + + image, err := r.buildWithBuildkit(ctx, streams, opts, dockerfile, build) + if err != nil { + return nil, "", err + } + build.BuilderMeta.RemoteMachineId = image.BuilderID + cmdfmt.PrintDone(streams.ErrOut, "Building image done") + span.SetAttributes(image.ToSpanAttributes()...) + return image, "", nil +} + +func (r *BuildkitBuilder) buildWithBuildkit(ctx context.Context, streams *iostreams.IOStreams, opts ImageOptions, dockerfilePath string, buildState *build) (i *DeploymentImage, err error) { + ctx, span := tracing.GetTracer().Start(ctx, "buildkit_build", trace.WithAttributes(opts.ToSpanAttributes()...)) + defer func() { + if err != nil { + span.RecordError(err) + } + streams.StopProgressIndicator() + span.End() + }() + + app := r.provisioner.org.RemoteBuilderApp + if r.addr == "" && app != nil { + r.addr = fmt.Sprintf("%s.flycast:%d", app.Name, buildkitGRPCPort) + } + + buildState.BuilderInitStart() + defer buildState.BuilderInitFinish() + buildState.SetBuilderMetaPart1("buildkit", r.addr, "") + + streams.StartProgressIndicator() + + buildkitClient, err := r.connectClient(ctx, appToAppCompact(app), opts.AppName) + if err != nil { + return nil, fmt.Errorf("failed to create buildkit client: %w", err) + } + + streams.StopProgressIndicator() + cmdfmt.PrintDone(streams.ErrOut, fmt.Sprintf("Connected to buildkit daemon at %s", r.addr)) + + buildState.BuildAndPushStart() + defer buildState.BuildAndPushFinish() + + res, err := buildImage(ctx, buildkitClient, opts, dockerfilePath) + if err != nil { + return nil, err + } + + return newDeploymentImage(ctx, buildkitClient, res, opts.Tag) +} + +func (r *BuildkitBuilder) connectClient(ctx context.Context, app *fly.AppCompact, appName string) (*client.Client, error) { + recreateBuilder := flag.GetRecreateBuilder(ctx) + ensureBuilder := false + if r.addr == "" || recreateBuilder { + updateProgress(ctx, "Updating remote builder...") + _, builderApp, err := r.provisioner.EnsureBuilder( + ctx, os.Getenv("FLY_REMOTE_BUILDER_REGION"), recreateBuilder, + ) + if err != nil { + return nil, err + } + app = appToAppCompact(builderApp) + r.addr = fmt.Sprintf("%s.flycast:%d", app.Name, buildkitGRPCPort) + ensureBuilder = true + } + var opts []client.ClientOpt + apiClient := flyutil.ClientFromContext(ctx) + if app != nil { + _, dialer, err := agent.BringUpAgent(ctx, apiClient, app, app.Network, true) + if err != nil { + return nil, fmt.Errorf("failed wireguard connection: %w", err) + } + opts = append(opts, client.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { + return dialer.DialContext(ctx, "tcp", addr) + })) + } + + updateProgress(ctx, "Connecting to buildkit daemon at %s...", r.addr) + buildkitClient, err := client.New(ctx, r.addr, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create buildkit client: %w", err) + } + _, err = buildkitClient.Info(ctx) + if err != nil { + if app == nil { // Retry with Wireguard connection + app, err = apiClient.GetAppCompact(ctx, appName) + if err != nil { + return nil, fmt.Errorf("failed to get app: %w", err) + } + return r.connectClient(ctx, app, appName) + } else if !ensureBuilder && r.provisioner.buildkitImage != "" { // Retry with ensureBuilder + r.addr = "" + return r.connectClient(ctx, nil, appName) + } else { + return nil, fmt.Errorf("failed to connect to buildkit: %w", err) + } + } + return buildkitClient, nil +} + +func updateProgress(ctx context.Context, msg string, a ...any) { + msg = fmt.Sprintf(msg+"\n", a...) + streams := iostreams.FromContext(ctx) + if streams.IsInteractive() { + streams.ChangeProgressIndicatorMsg(msg) + } else { + fmt.Fprintln(streams.ErrOut, msg) + } +} + +func readContent(ctx context.Context, contentClient content.ContentClient, desc *Descriptor) (string, error) { + readClient, err := contentClient.Read(ctx, &content.ReadContentRequest{Digest: desc.Digest}) + if err != nil { + return "", fmt.Errorf("failed to create read stream: %w", err) + } + var data []byte + for { + resp, err := readClient.Recv() + if err != nil { + if err == io.EOF { + break + } + return "", fmt.Errorf("failed to read from stream: %w", err) + } + data = append(data, resp.Data...) + } + return string(data), nil +} diff --git a/internal/build/imgsrc/depot.go b/internal/build/imgsrc/depot.go index 23ecb077f4..2ac61da13b 100644 --- a/internal/build/imgsrc/depot.go +++ b/internal/build/imgsrc/depot.go @@ -8,13 +8,14 @@ import ( "log" "os" "path/filepath" + "strconv" "time" depotbuild "github.com/depot/depot-go/build" depotmachine "github.com/depot/depot-go/machine" "github.com/moby/buildkit/client" "github.com/moby/buildkit/session/secrets/secretsprovider" - "github.com/moby/buildkit/util/progress/progressui" + "github.com/moby/buildkit/worker/label" "github.com/pkg/errors" "github.com/superfly/fly-go" "github.com/superfly/flyctl/helpers" @@ -25,7 +26,6 @@ import ( "github.com/superfly/flyctl/internal/render" "github.com/superfly/flyctl/internal/tracing" "github.com/superfly/flyctl/iostreams" - "github.com/superfly/flyctl/retry" "github.com/superfly/flyctl/terminal" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -110,7 +110,7 @@ func (d *DepotBuilder) Run(ctx context.Context, _ *dockerClientFactory, streams tracing.RecordError(span, err, "failed to build image") return nil, "", errors.Wrap(err, "error building") } - + build.BuilderMeta.RemoteMachineId = image.BuilderID build.ImageBuildFinish() build.BuildFinish() cmdfmt.PrintDone(streams.ErrOut, "Building image done") @@ -119,9 +119,15 @@ func (d *DepotBuilder) Run(ctx context.Context, _ *dockerClientFactory, streams return image, "", nil } -func depotBuild(ctx context.Context, streams *iostreams.IOStreams, opts ImageOptions, dockerfilePath string, buildState *build, scope depotBuilderScope) (*DeploymentImage, error) { +func depotBuild(ctx context.Context, streams *iostreams.IOStreams, opts ImageOptions, dockerfilePath string, buildState *build, scope depotBuilderScope) (i *DeploymentImage, retErr error) { ctx, span := tracing.GetTracer().Start(ctx, "depot_build", trace.WithAttributes(opts.ToSpanAttributes()...)) - defer span.End() + defer func() { + if retErr != nil { + streams.StopProgressIndicator() + span.RecordError(retErr) + } + span.End() + }() buildState.BuilderInitStart() buildState.SetBuilderMetaPart1(depotBuilderType, "", "") @@ -135,10 +141,13 @@ func depotBuild(ctx context.Context, streams *iostreams.IOStreams, opts ImageOpt } } - buildkit, build, buildErr := initBuilder(ctx, buildState, opts.AppName, streams, scope) + // Building a container image may take multiple minutes. + // So we can only have the provisoning part in this context. + provisionCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + + buildkit, build, buildErr := initBuilder(provisionCtx, buildState, opts.AppName, streams, scope) if buildErr != nil { - streams.StopProgressIndicator() - span.RecordError(buildErr) return nil, buildErr } defer func() { @@ -146,15 +155,10 @@ func depotBuild(ctx context.Context, streams *iostreams.IOStreams, opts ImageOpt build.Finish(buildErr) }() - connectCtx, cancelConnect := context.WithTimeout(ctx, 5*time.Minute) - defer cancelConnect() - span.AddEvent("connecting to buildkit") var buildkitClient *client.Client - buildkitClient, buildErr = buildkit.Connect(connectCtx) + buildkitClient, buildErr = buildkit.Connect(provisionCtx) if buildErr != nil { - streams.StopProgressIndicator() - span.RecordError(buildErr) return nil, buildErr } @@ -168,7 +172,6 @@ func depotBuild(ctx context.Context, streams *iostreams.IOStreams, opts ImageOpt res, buildErr := buildImage(ctx, buildkitClient, opts, dockerfilePath) if buildErr != nil { buildState.BuildAndPushFinish() - span.RecordError(buildErr) return nil, buildErr } buildState.BuildAndPushFinish() @@ -176,14 +179,23 @@ func depotBuild(ctx context.Context, streams *iostreams.IOStreams, opts ImageOpt link = streams.CreateLink("Build Summary: ", build.BuildURL) tb.Done(link) - return newDeploymentImage(res, opts.Tag) + return newDeploymentImage(ctx, buildkitClient, res, opts.Tag) } -func initBuilder(ctx context.Context, buildState *build, appName string, streams *iostreams.IOStreams, builderScope depotBuilderScope) (*depotmachine.Machine, *depotbuild.Build, error) { +// initBuilder returns a Depot machine to build a container image. +// Note that the caller is responsible for passing a context with a resonable timeout. +// Otherwise, the function cloud block indefinitely. +func initBuilder(ctx context.Context, buildState *build, appName string, streams *iostreams.IOStreams, builderScope depotBuilderScope) (m *depotmachine.Machine, b *depotbuild.Build, retErr error) { ctx, span := tracing.GetTracer().Start(ctx, "init_depot_build") - defer span.End() - defer buildState.BuilderInitFinish() + defer func() { + if retErr != nil { + streams.StopProgressIndicator() + span.RecordError(retErr) + } + buildState.BuilderInitFinish() + span.End() + }() apiClient := flyutil.ClientFromContext(ctx) region := os.Getenv("FLY_REMOTE_BUILDER_REGION") @@ -198,7 +210,6 @@ func initBuilder(ctx context.Context, buildState *build, appName string, streams BuilderScope: fly.StringPointer(builderScope.String()), }) if err != nil { - streams.StopProgressIndicator() return nil, nil, err } @@ -207,32 +218,14 @@ func initBuilder(ctx context.Context, buildState *build, appName string, streams return nil, nil, err } - // Set the buildErr to any error that represents the build failing. - var buildErr error - var finalBuildErr error - span.AddEvent("Acquiring Depot machine") - var buildkit *depotmachine.Machine - - timeoutCtx, cancel := context.WithTimeout(ctx, 25*time.Second) - defer cancel() - - finalBuildErr = retry.Retry(timeoutCtx, func() error { - buildkit, buildErr = depotmachine.Acquire(ctx, build.ID, build.Token, "amd64") - if buildErr != nil { - span.RecordError(buildErr) - return buildErr - } - - return nil - }, 2) - if finalBuildErr != nil { - streams.StopProgressIndicator() - return nil, nil, finalBuildErr + machine, err := depotmachine.Acquire(ctx, build.ID, build.Token, "amd64") + if err != nil { + return nil, nil, err } - return buildkit, &build, err + return machine, &build, nil } func buildImage(ctx context.Context, buildkitClient *client.Client, opts ImageOptions, dockerfilePath string) (*client.SolveResponse, error) { @@ -256,11 +249,9 @@ func buildImage(ctx context.Context, buildkitClient *client.Client, opts ImageOp exportEntry.Attrs["push"] = "true" } - if opts.UseZstd { - exportEntry.Attrs["compression"] = "zstd" - exportEntry.Attrs["compression-level"] = "3" - exportEntry.Attrs["force-compression"] = "true" - } + exportEntry.Attrs["compression"] = opts.Compression + exportEntry.Attrs["compression-level"] = strconv.Itoa(opts.CompressionLevel) + exportEntry.Attrs["force-compression"] = "true" ch := make(chan *client.SolveStatus) eg, ctx := errgroup.WithContext(ctx) @@ -296,23 +287,16 @@ func buildImage(ctx context.Context, buildkitClient *client.Client, opts ImageOp } solverOptions.Session = append( solverOptions.Session, - newBuildkitAuthProvider(config.Tokens(ctx).Docker()), + newBuildkitAuthProvider(func() string { + return config.Tokens(ctx).Docker() + }), secretsprovider.FromMap(secrets), ) res, err = buildkitClient.Solve(ctx, nil, solverOptions, ch) return err }) - - eg.Go(func() error { - display, err := progressui.NewDisplay(os.Stderr, progressui.AutoMode) - if err != nil { - return err - } - - _, err = display.UpdateFrom(context.Background(), ch) - return err - }) + eg.Go(newDisplay(ch)) if err := eg.Wait(); err != nil { span.RecordError(err) @@ -322,7 +306,7 @@ func buildImage(ctx context.Context, buildkitClient *client.Client, opts ImageOp return res, nil } -func newDeploymentImage(res *client.SolveResponse, tag string) (*DeploymentImage, error) { +func newDeploymentImage(ctx context.Context, c *client.Client, res *client.SolveResponse, tag string) (*DeploymentImage, error) { id := res.ExporterResponse["containerimage.digest"] encoded := res.ExporterResponse["containerimage.descriptor"] output, err := base64.StdEncoding.DecodeString(encoded) @@ -336,10 +320,27 @@ func newDeploymentImage(res *client.SolveResponse, tag string) (*DeploymentImage return nil, err } + // Standard Buildkit doesn't attach manifest contents to the descriptor. + if descriptor.Annotations.RawManifest == "" { + descriptor.Annotations.RawManifest, err = readContent(ctx, c.ContentClient(), descriptor) + if err != nil { + return nil, err + } + } + + var builderHostname string + workers, err := c.ListWorkers(ctx) + if err != nil { + return nil, err + } + for _, w := range workers { + builderHostname = w.Labels[label.Hostname] + } image := &DeploymentImage{ - ID: id, - Tag: tag, - Size: descriptor.Bytes(), + ID: id, + Tag: tag, + Size: descriptor.Bytes(), + BuilderID: builderHostname, } return image, nil diff --git a/internal/build/imgsrc/depot_test.go b/internal/build/imgsrc/depot_test.go new file mode 100644 index 0000000000..bfee9db1fe --- /dev/null +++ b/internal/build/imgsrc/depot_test.go @@ -0,0 +1,22 @@ +package imgsrc + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + "github.com/superfly/fly-go" + "github.com/superfly/flyctl/internal/flyutil" + "github.com/superfly/flyctl/iostreams" +) + +func TestInitBuilder(t *testing.T) { + ctx := context.Background() + ctx = flyutil.NewContextWithClient(ctx, flyutil.NewClientFromOptions(ctx, fly.ClientOptions{BaseURL: "invalid://localhost"})) + ios, _, _, _ := iostreams.Test() + build := newBuild("build1", false) + + // The invocation below doesn't test things much, but it may be better than nothing. + _, _, err := initBuilder(ctx, build, "app1", ios, DepotBuilderScopeOrganization) + require.ErrorContains(t, err, `unsupported protocol scheme "invalid"`) +} diff --git a/internal/build/imgsrc/docker.go b/internal/build/imgsrc/docker.go index e9e4f620c8..68825c95b3 100644 --- a/internal/build/imgsrc/docker.go +++ b/internal/build/imgsrc/docker.go @@ -29,6 +29,7 @@ import ( "github.com/superfly/flyctl/agent" "github.com/superfly/flyctl/flyctl" "github.com/superfly/flyctl/helpers" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/config" "github.com/superfly/flyctl/internal/flyerr" "github.com/superfly/flyctl/internal/flyutil" @@ -37,6 +38,7 @@ import ( "github.com/superfly/flyctl/internal/tracing" "github.com/superfly/flyctl/iostreams" "github.com/superfly/flyctl/terminal" + "github.com/superfly/macaroon/flyio/machinesapi" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" ) @@ -61,7 +63,37 @@ func newDockerClientFactory(daemonType DockerDaemonType, apiClient flyutil.Clien mode: daemonType, remote: true, buildFn: func(ctx context.Context, build *build) (*dockerclient.Client, error) { - return newRemoteDockerClient(ctx, apiClient, appName, streams, build, cachedDocker, connectOverWireguard, recreateBuilder) + cfg := config.FromContext(ctx) + var ( + builderMachine *fly.Machine + builderApp *fly.App + err error + ) + + managed := daemonType.UseManagedBuilder() + if cfg.DisableManagedBuilders { + managed = false + } + if managed { + connectOverWireguard = false + builderMachine, builderApp, err = remoteManagedBuilderMachine(ctx, apiClient, appName) + if err != nil { + return nil, err + } + } else { + var org *fly.Organization + org, err = apiClient.GetOrganizationByApp(ctx, appName) + if err != nil { + return nil, err + } + provisioner := NewProvisioner(org) + builderMachine, builderApp, err = provisioner.EnsureBuilder(ctx, os.Getenv("FLY_REMOTE_BUILDER_REGION"), recreateBuilder) + if err != nil { + return nil, err + } + } + + return newRemoteDockerClient(ctx, apiClient, appName, streams, build, cachedDocker, connectOverWireguard, builderApp, builderMachine) }, apiClient: apiClient, appName: appName, @@ -108,15 +140,15 @@ func newDockerClientFactory(daemonType DockerDaemonType, apiClient flyutil.Clien } } -func NewDockerDaemonType(allowLocal, allowRemote, prefersLocal, useDepot, useNixpacks bool) DockerDaemonType { +func NewDockerDaemonType(allowLocal, allowRemote, prefersLocal, useDepot, useNixpacks bool, useManagedBuilder bool) DockerDaemonType { daemonType := DockerDaemonTypeNone if allowLocal { daemonType = daemonType | DockerDaemonTypeLocal } - if allowRemote { + if allowRemote || useManagedBuilder { daemonType = daemonType | DockerDaemonTypeRemote } - if useDepot { + if useDepot && !useManagedBuilder { daemonType = daemonType | DockerDaemonTypeDepot } if useNixpacks { @@ -125,6 +157,9 @@ func NewDockerDaemonType(allowLocal, allowRemote, prefersLocal, useDepot, useNix if prefersLocal && !useDepot { daemonType = daemonType | DockerDaemonTypePrefersLocal } + if useManagedBuilder { + daemonType = daemonType | DockerDaemonTypeManaged + } return daemonType } @@ -137,6 +172,7 @@ const ( DockerDaemonTypePrefersLocal DockerDaemonTypeNixpacks DockerDaemonTypeDepot + DockerDaemonTypeManaged ) func (t DockerDaemonType) String() string { @@ -157,6 +193,9 @@ func (t DockerDaemonType) String() string { if t&DockerDaemonTypeDepot != 0 { strs = append(strs, "depot") } + if t&DockerDaemonTypeManaged != 0 { + strs = append(strs, "managed") + } if len(strs) == 0 { return "none" } @@ -192,6 +231,10 @@ func (t DockerDaemonType) UseDepot() bool { return (t & DockerDaemonTypeDepot) != 0 } +func (t DockerDaemonType) UseManagedBuilder() bool { + return (t & DockerDaemonTypeManaged) != 0 +} + func (t DockerDaemonType) PrefersLocal() bool { return (t & DockerDaemonTypePrefersLocal) != 0 } @@ -220,7 +263,7 @@ func logClearLinesAbove(streams *iostreams.IOStreams, count int) { } } -func newRemoteDockerClient(ctx context.Context, apiClient flyutil.Client, appName string, streams *iostreams.IOStreams, build *build, cachedClient *dockerclient.Client, connectOverWireguard, recreateBuilder bool) (c *dockerclient.Client, err error) { +func newRemoteDockerClient(ctx context.Context, apiClient flyutil.Client, appName string, streams *iostreams.IOStreams, build *build, cachedClient *dockerclient.Client, connectOverWireguard bool, builderApp *fly.App, builderMachine *fly.Machine) (c *dockerclient.Client, err error) { ctx, span := tracing.GetTracer().Start(ctx, "build_remote_docker_client", trace.WithAttributes( attribute.Bool("connect_over_wireguard", connectOverWireguard), )) @@ -239,9 +282,8 @@ func newRemoteDockerClient(ctx context.Context, apiClient flyutil.Client, appNam }() var host string - var app *fly.App - var machine *fly.Machine - machine, app, err = remoteBuilderMachine(ctx, apiClient, appName, recreateBuilder) + app := builderApp + machine := builderMachine if err != nil { tracing.RecordError(span, err, "failed to init remote builder machine") return nil, err @@ -287,6 +329,8 @@ func newRemoteDockerClient(ctx context.Context, apiClient flyutil.Client, appNam return nil, err } + _ = appsecrets.DeleteMinvers(ctx, app.Name) + fmt.Fprintln(streams.Out, streams.ColorScheme().Yellow("🔧 creating fresh remote builder, (this might take a while ...)")) machine, app, err = remoteBuilderMachine(ctx, apiClient, appName, false) if err != nil { @@ -646,10 +690,12 @@ func registryAuth(token string) registry.AuthConfig { func authConfigs(token string) map[string]registry.AuthConfig { targetRegistry := viper.GetString(flyctl.ConfigRegistryHost) + mirrorRegistry := net.JoinHostPort(machinesapi.InternalURL.Hostname(), "5000") authConfigs := map[string]registry.AuthConfig{} authConfigs[targetRegistry] = registryAuth(token) + authConfigs[mirrorRegistry] = registryAuth(token) dockerhubUsername := os.Getenv("DOCKER_HUB_USERNAME") dockerhubPassword := os.Getenv("DOCKER_HUB_PASSWORD") @@ -720,8 +766,8 @@ func EagerlyEnsureRemoteBuilder(ctx context.Context, apiClient flyutil.Client, o return } - region := os.Getenv("FLY_REMOTE_BUILDER_REGION") - _, app, err := EnsureBuilder(ctx, org, region, recreateBuilder) + provisioner := NewProvisioner(org) + _, app, err := provisioner.EnsureBuilder(ctx, os.Getenv("FLY_REMOTE_BUILDER_REGION"), recreateBuilder) if err != nil { terminal.Debugf("error ensuring remote builder for organization: %s", err) return @@ -735,12 +781,26 @@ func remoteBuilderMachine(ctx context.Context, apiClient flyutil.Client, appName return nil, nil, nil } + org, err := apiClient.GetOrganizationByApp(ctx, appName) + if err != nil { + return nil, nil, err + } + provisioner := NewProvisioner(org) + builderMachine, builderApp, err := provisioner.EnsureBuilder(ctx, os.Getenv("FLY_REMOTE_BUILDER_REGION"), recreateBuilder) + return builderMachine, builderApp, err +} + +func remoteManagedBuilderMachine(ctx context.Context, apiClient flyutil.Client, appName string) (*fly.Machine, *fly.App, error) { + if v := os.Getenv("FLY_REMOTE_BUILDER_HOST"); v != "" { + return nil, nil, nil + } + region := os.Getenv("FLY_REMOTE_BUILDER_REGION") org, err := apiClient.GetOrganizationByApp(ctx, appName) if err != nil { return nil, nil, err } - builderMachine, builderApp, err := EnsureBuilder(ctx, org, region, recreateBuilder) + builderMachine, builderApp, err := EnsureFlyManagedBuilder(ctx, org, region) return builderMachine, builderApp, err } diff --git a/internal/build/imgsrc/docker_test.go b/internal/build/imgsrc/docker_test.go index 6f115e6560..da632976b7 100644 --- a/internal/build/imgsrc/docker_test.go +++ b/internal/build/imgsrc/docker_test.go @@ -8,26 +8,30 @@ import ( func TestAllowedDockerDaemonMode(t *testing.T) { tests := []struct { - allowLocal bool - allowRemote bool - preferslocal bool - useDepot bool - useNixpacks bool - expected DockerDaemonType + allowLocal bool + allowRemote bool + preferslocal bool + useDepot bool + useNixpacks bool + useManagedBuilder bool + expected DockerDaemonType }{ - {false, false, false, false, false, DockerDaemonTypeNone}, - {false, false, true, false, false, DockerDaemonTypeNone | DockerDaemonTypePrefersLocal}, - {false, true, false, false, false, DockerDaemonTypeNone | DockerDaemonTypeRemote}, - {false, true, true, false, false, DockerDaemonTypeNone | DockerDaemonTypeRemote | DockerDaemonTypePrefersLocal}, - {true, false, false, false, false, DockerDaemonTypeNone | DockerDaemonTypeLocal}, - {true, false, true, false, false, DockerDaemonTypeNone | DockerDaemonTypeLocal | DockerDaemonTypePrefersLocal}, - {true, true, false, false, false, DockerDaemonTypeNone | DockerDaemonTypeLocal | DockerDaemonTypeRemote}, - {true, true, true, false, false, DockerDaemonTypeNone | DockerDaemonTypeLocal | DockerDaemonTypeRemote | DockerDaemonTypePrefersLocal}, - {true, true, false, true, false, DockerDaemonTypeNone | DockerDaemonTypeDepot | DockerDaemonTypeRemote | DockerDaemonTypeLocal}, + {false, false, false, false, false, false, DockerDaemonTypeNone}, + {false, false, true, false, false, false, DockerDaemonTypeNone | DockerDaemonTypePrefersLocal}, + {false, true, false, false, false, false, DockerDaemonTypeNone | DockerDaemonTypeRemote}, + {false, true, true, false, false, false, DockerDaemonTypeNone | DockerDaemonTypeRemote | DockerDaemonTypePrefersLocal}, + {true, false, false, false, false, false, DockerDaemonTypeNone | DockerDaemonTypeLocal}, + {true, false, true, false, false, false, DockerDaemonTypeNone | DockerDaemonTypeLocal | DockerDaemonTypePrefersLocal}, + {true, true, false, false, false, false, DockerDaemonTypeNone | DockerDaemonTypeLocal | DockerDaemonTypeRemote}, + {true, true, true, false, false, false, DockerDaemonTypeNone | DockerDaemonTypeLocal | DockerDaemonTypeRemote | DockerDaemonTypePrefersLocal}, + {true, true, false, true, false, false, DockerDaemonTypeNone | DockerDaemonTypeDepot | DockerDaemonTypeRemote | DockerDaemonTypeLocal}, + {true, true, false, false, false, true, DockerDaemonTypeNone | DockerDaemonTypeRemote | DockerDaemonTypeLocal | DockerDaemonTypeManaged}, + {true, true, false, true, false, true, DockerDaemonTypeNone | DockerDaemonTypeRemote | DockerDaemonTypeLocal | DockerDaemonTypeManaged}, + {false, false, false, false, false, true, DockerDaemonTypeNone | DockerDaemonTypeRemote | DockerDaemonTypeManaged}, } for _, test := range tests { - m := NewDockerDaemonType(test.allowLocal, test.allowRemote, test.preferslocal, test.useDepot, test.useNixpacks) + m := NewDockerDaemonType(test.allowLocal, test.allowRemote, test.preferslocal, test.useDepot, test.useNixpacks, test.useManagedBuilder) assert.Equal(t, test.expected, m) } } diff --git a/internal/build/imgsrc/dockerfile_builder.go b/internal/build/imgsrc/dockerfile_builder.go index 98ab4891b9..78c950bbbb 100644 --- a/internal/build/imgsrc/dockerfile_builder.go +++ b/internal/build/imgsrc/dockerfile_builder.go @@ -24,7 +24,6 @@ import ( "github.com/moby/buildkit/client" "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/session/secrets/secretsprovider" - "github.com/moby/buildkit/util/progress/progressui" "github.com/pkg/errors" "github.com/superfly/flyctl/helpers" "github.com/superfly/flyctl/internal/buildinfo" @@ -492,19 +491,7 @@ func runBuildKitBuild(ctx context.Context, docker *dockerclient.Client, opts Ima // Build the image. statusCh := make(chan *client.SolveStatus) eg, ctx := errgroup.WithContext(ctx) - eg.Go(func() error { - var err error - - display, err := progressui.NewDisplay(os.Stderr, "auto") - if err != nil { - return err - } - // Don't use `ctx` here. - // Cancelling the context kills the reader of statusCh which blocks bc.Solve below. - // bc.Solve closes statusCh at the end and UpdateFrom returns by reading the closed channel. - _, err = display.UpdateFrom(context.Background(), statusCh) - return err - }) + eg.Go(newDisplay(statusCh)) var res *client.SolveResponse eg.Go(func() error { options := solveOptFromImageOptions(opts, dockerfilePath, buildArgs) @@ -516,7 +503,9 @@ func runBuildKitBuild(ctx context.Context, docker *dockerclient.Client, opts Ima options.Session, // To pull images from local Docker Engine with Fly's access token, // we need to pass the provider. Remote builders don't need that. - newBuildkitAuthProvider(config.Tokens(ctx).Docker()), + newBuildkitAuthProvider(func() string { + return config.Tokens(ctx).Docker() + }), secretsprovider.FromMap(secrets), ) diff --git a/internal/build/imgsrc/ensure_builder.go b/internal/build/imgsrc/ensure_builder.go index 0893c3c73d..c3438cbc6b 100644 --- a/internal/build/imgsrc/ensure_builder.go +++ b/internal/build/imgsrc/ensure_builder.go @@ -7,18 +7,83 @@ import ( "strings" "time" - "github.com/samber/lo" "github.com/superfly/fly-go" "github.com/superfly/fly-go/flaps" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/flapsutil" "github.com/superfly/flyctl/internal/flyutil" "github.com/superfly/flyctl/internal/haikunator" "github.com/superfly/flyctl/internal/tracing" + "github.com/superfly/flyctl/internal/uiexutil" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" ) -func EnsureBuilder(ctx context.Context, org *fly.Organization, region string, recreateBuilder bool) (*fly.Machine, *fly.App, error) { +type Provisioner struct { + org *fly.Organization + useVolume bool + buildkitAddr string + buildkitImage string +} + +func NewProvisioner(org *fly.Organization) *Provisioner { + return &Provisioner{ + org: org, + useVolume: true, + } +} + +func NewBuildkitProvisioner(org *fly.Organization, addr, image string) *Provisioner { + return &Provisioner{ + org: org, + useVolume: true, + buildkitAddr: addr, + buildkitImage: image, + } +} + +func (p *Provisioner) UseBuildkit() bool { + return p.buildkitAddr != "" || p.buildkitImage != "" +} + +const defaultImage = "docker-hub-mirror.fly.io/flyio/rchab:sha-9346699" +const DefaultBuildkitImage = "docker-hub-mirror.fly.io/flyio/buildkit@sha256:0fe49e6f506f0961cb2fc45d56171df0e852229facf352f834090345658b7e1c" + +func (p *Provisioner) image() string { + if p.buildkitImage != "" { + return p.buildkitImage + } + if p.org.RemoteBuilderImage != "" { + return p.org.RemoteBuilderImage + } + return defaultImage +} + +func appToAppCompact(app *fly.App) *fly.AppCompact { + if app == nil { + return nil + } + return &fly.AppCompact{ + ID: app.ID, + Name: app.Name, + Status: app.Status, + Deployed: app.Deployed, + Hostname: app.Hostname, + AppURL: app.AppURL, + Organization: &fly.OrganizationBasic{ + ID: app.Organization.ID, + Name: app.Organization.Name, + Slug: app.Organization.Slug, + RawSlug: app.Organization.RawSlug, + PaidPlan: app.Organization.PaidPlan, + }, + PlatformVersion: app.PlatformVersion, + PostgresAppRole: app.PostgresAppRole, + } +} + +func (p *Provisioner) EnsureBuilder(ctx context.Context, region string, recreateBuilder bool) (*fly.Machine, *fly.App, error) { + org := p.org ctx, span := tracing.GetTracer().Start(ctx, "ensure_builder") defer span.End() @@ -27,26 +92,9 @@ func EnsureBuilder(ctx context.Context, org *fly.Organization, region string, re if builderApp != nil { span.SetAttributes(attribute.String("builder_app", builderApp.Name)) flaps, err := flapsutil.NewClientWithOptions(ctx, flaps.NewClientOpts{ - AppName: builderApp.Name, - // TOOD(billy) make a utility function for App -> AppCompact - AppCompact: &fly.AppCompact{ - ID: builderApp.ID, - Name: builderApp.Name, - Status: builderApp.Status, - Deployed: builderApp.Deployed, - Hostname: builderApp.Hostname, - AppURL: builderApp.AppURL, - Organization: &fly.OrganizationBasic{ - ID: builderApp.Organization.ID, - Name: builderApp.Organization.Name, - Slug: builderApp.Organization.Slug, - RawSlug: builderApp.Organization.RawSlug, - PaidPlan: builderApp.Organization.PaidPlan, - }, - PlatformVersion: builderApp.PlatformVersion, - PostgresAppRole: builderApp.PostgresAppRole, - }, - OrgSlug: builderApp.Organization.Slug, + AppName: builderApp.Name, + AppCompact: appToAppCompact(builderApp), + OrgSlug: builderApp.Organization.Slug, }) if err != nil { tracing.RecordError(span, err, "error creating flaps client") @@ -55,7 +103,7 @@ func EnsureBuilder(ctx context.Context, org *fly.Organization, region string, re ctx = flapsutil.NewContextWithClient(ctx, flaps) } - builderMachine, err := validateBuilder(ctx, builderApp) + builderMachine, err := p.validateBuilder(ctx, builderApp) if err == nil { span.AddEvent("builder app already exists and is valid") return builderMachine, builderApp, nil @@ -88,6 +136,8 @@ func EnsureBuilder(ctx context.Context, org *fly.Organization, region string, re tracing.RecordError(span, err, "error deleting invalid builder app") return nil, nil, err } + + _ = appsecrets.DeleteMinvers(ctx, builderApp.Name) } } else { span.AddEvent("recreating builder") @@ -98,6 +148,8 @@ func EnsureBuilder(ctx context.Context, org *fly.Organization, region string, re tracing.RecordError(span, err, "error deleting existing builder app") return nil, nil, err } + + _ = appsecrets.DeleteMinvers(ctx, org.RemoteBuilderApp.Name) } } @@ -113,7 +165,7 @@ func EnsureBuilder(ctx context.Context, org *fly.Organization, region string, re return nil, nil, err } ctx = flapsutil.NewContextWithClient(ctx, flapsClient) - app, machine, err := createBuilder(ctx, org, region, builderName) + app, machine, err := p.createBuilder(ctx, region, builderName) if err != nil { tracing.RecordError(span, err, "error creating builder") return nil, nil, err @@ -121,6 +173,18 @@ func EnsureBuilder(ctx context.Context, org *fly.Organization, region string, re return machine, app, nil } +func EnsureFlyManagedBuilder(ctx context.Context, org *fly.Organization, region string) (*fly.Machine, *fly.App, error) { + ctx, span := tracing.GetTracer().Start(ctx, "ensure_fly_managed_builder") + defer span.End() + + app, machine, err := createFlyManagedBuilder(ctx, org, region) + if err != nil { + tracing.RecordError(span, err, "error creating fly managed builder") + return nil, nil, err + } + return machine, app, nil +} + type ValidateBuilderError int func (e ValidateBuilderError) Error() string { @@ -146,9 +210,31 @@ const ( InvalidMachineCount BuilderMachineNotStarted ShouldReplaceBuilderMachine + + buildkitGRPCPort = 1234 ) -func validateBuilder(ctx context.Context, app *fly.App) (*fly.Machine, error) { +// validateBuilder returns a machine if it is available for building images. +func (p *Provisioner) validateBuilder(ctx context.Context, app *fly.App) (*fly.Machine, error) { + machine, err := p.validateBuilderMachine(ctx, app) + if err != nil { + // validateBuilderMachine returns a machine even if there is an error. + return machine, err + } + + // Don't run extra checks for non-Buildkit cases. + if !p.UseBuildkit() { + return machine, nil + } + + // If not, make sure the machine is configured for Buildkit. + if len(machine.Config.Services) == 1 && machine.Config.Services[0].InternalPort == buildkitGRPCPort { + return machine, nil + } + return nil, ShouldReplaceBuilderMachine +} + +func (p *Provisioner) validateBuilderMachine(ctx context.Context, app *fly.App) (*fly.Machine, error) { var builderAppName string if app != nil { builderAppName = app.Name @@ -163,9 +249,11 @@ func validateBuilder(ctx context.Context, app *fly.App) (*fly.Machine, error) { flapsClient := flapsutil.ClientFromContext(ctx) - if _, err := validateBuilderVolumes(ctx, flapsClient); err != nil { - tracing.RecordError(span, err, "error validating builder volumes") - return nil, err + if p.useVolume { + if _, err := validateBuilderVolumes(ctx, flapsClient); err != nil { + tracing.RecordError(span, err, "error validating builder volumes") + return nil, err + } } machine, err := validateBuilderMachines(ctx, flapsClient) if err != nil { @@ -179,7 +267,6 @@ func validateBuilder(ctx context.Context, app *fly.App) (*fly.Machine, error) { } return machine, nil - } func validateBuilderVolumes(ctx context.Context, flapsClient flapsutil.FlapsClient) (*fly.Volume, error) { @@ -262,7 +349,10 @@ func validateBuilderMachines(ctx context.Context, flapsClient flapsutil.FlapsCli return machines[0], nil } -func createBuilder(ctx context.Context, org *fly.Organization, region, builderName string) (app *fly.App, mach *fly.Machine, retErr error) { +func (p *Provisioner) createBuilder(ctx context.Context, region, builderName string) (app *fly.App, mach *fly.Machine, retErr error) { + buildkit := p.UseBuildkit() + + org := p.org ctx, span := tracing.GetTracer().Start(ctx, "create_builder") defer span.End() @@ -285,13 +375,22 @@ func createBuilder(ctx context.Context, org *fly.Organization, region, builderNa if retErr != nil { span.AddEvent("cleaning up new builder app due to error") client.DeleteApp(ctx, builderName) + _ = appsecrets.DeleteMinvers(ctx, builderName) } }() - _, retErr = client.AllocateIPAddress(ctx, app.Name, "shared_v4", "", org, "") - if retErr != nil { - tracing.RecordError(span, retErr, "error allocating ip address") - return nil, nil, retErr + if buildkit { + _, retErr = client.AllocateIPAddress(ctx, app.Name, "private_v6", "", org, "") + if retErr != nil { + tracing.RecordError(span, retErr, "error allocating ip address") + return nil, nil, retErr + } + } else { + _, retErr = client.AllocateIPAddress(ctx, app.Name, "shared_v4", "", org, "") + if retErr != nil { + tracing.RecordError(span, retErr, "error allocating ip address") + return nil, nil, retErr + } } guest := fly.MachineGuest{ @@ -313,91 +412,120 @@ func createBuilder(ctx context.Context, org *fly.Organization, region, builderNa return nil, nil, fmt.Errorf("waiting for app %s: %w", app.Name, retErr) } - var volume *fly.Volume - numRetries := 0 - for { - volume, retErr = flapsClient.CreateVolume(ctx, fly.CreateVolumeRequest{ - Name: "machine_data", - SizeGb: fly.IntPointer(50), - AutoBackupEnabled: fly.BoolPointer(false), - ComputeRequirements: &guest, - Region: region, - }) - if retErr == nil { - break + config := &fly.MachineConfig{ + Env: map[string]string{ + "ALLOW_ORG_SLUG": org.Slug, + "LOG_LEVEL": "debug", + }, + Guest: &guest, + Image: p.image(), + } + + if buildkit { + config.Services = []fly.MachineService{ + { + InternalPort: 1234, + Ports: []fly.MachinePort{{Port: fly.IntPointer(1234)}}, + Autostart: fly.BoolPointer(true), + Autostop: fly.Pointer(fly.MachineAutostopStop), + }, } + } else { + config.Services = []fly.MachineService{ + { + Protocol: "tcp", + InternalPort: 8080, + Autostop: fly.Pointer(fly.MachineAutostopOff), + Autostart: fly.BoolPointer(true), + MinMachinesRunning: fly.IntPointer(0), + Ports: []fly.MachinePort{ + { + Port: fly.IntPointer(80), + Handlers: []string{"http"}, + ForceHTTPS: true, + HTTPOptions: &fly.HTTPOptions{ + H2Backend: fly.BoolPointer(true), + }, + }, + { + Port: fly.IntPointer(443), + Handlers: []string{"http", "tls"}, + TLSOptions: &fly.TLSOptions{ + ALPN: []string{"h2"}, + }, + HTTPOptions: &fly.HTTPOptions{ + H2Backend: fly.BoolPointer(true), + }, + }, + }, + ForceInstanceKey: nil, + }, + } + } - var flapsErr *flaps.FlapsError - if errors.As(retErr, &flapsErr) && flapsErr.ResponseStatusCode >= 500 && flapsErr.ResponseStatusCode < 600 { - span.AddEvent(fmt.Sprintf("non-server error %d", flapsErr.ResponseStatusCode)) - numRetries += 1 + if p.useVolume { + var volume *fly.Volume + numRetries := 0 + for { + volume, retErr = flapsClient.CreateVolume(ctx, fly.CreateVolumeRequest{ + Name: "machine_data", + SizeGb: fly.IntPointer(50), + AutoBackupEnabled: fly.BoolPointer(false), + ComputeRequirements: &guest, + Region: region, + }) + if retErr == nil { + region = volume.Region + break + } - if numRetries >= 5 { + var flapsErr *flaps.FlapsError + if errors.As(retErr, &flapsErr) && flapsErr.ResponseStatusCode >= 500 && flapsErr.ResponseStatusCode < 600 { + span.AddEvent(fmt.Sprintf("non-server error %d", flapsErr.ResponseStatusCode)) + numRetries += 1 + + if numRetries >= 5 { + tracing.RecordError(span, retErr, "error creating volume") + return nil, nil, retErr + } + time.Sleep(1 * time.Second) + } else { tracing.RecordError(span, retErr, "error creating volume") return nil, nil, retErr } - time.Sleep(1 * time.Second) - } else { - tracing.RecordError(span, retErr, "error creating volume") - return nil, nil, retErr } - } - defer func() { - if retErr != nil { - span.AddEvent("cleaning up new volume due to error") - flapsClient.DeleteVolume(ctx, volume.ID) + defer func() { + if retErr != nil { + span.AddEvent("cleaning up new volume due to error") + flapsClient.DeleteVolume(ctx, volume.ID) + } + }() + + if buildkit { + config.Mounts = append(config.Mounts, fly.MachineMount{ + Path: "/var/lib/buildkit", + Volume: volume.ID, + Name: app.Name, + }) + } else { + config.Env["DATA_DIR"] = "/data" + config.Mounts = append(config.Mounts, fly.MachineMount{ + Path: "/data", + Volume: volume.ID, + Name: app.Name, + }) } - }() + } + minvers, err := appsecrets.GetMinvers(app.Name) + if err != nil { + return nil, nil, err + } mach, retErr = flapsClient.Launch(ctx, fly.LaunchMachineInput{ - Region: region, - Config: &fly.MachineConfig{ - Env: map[string]string{ - "ALLOW_ORG_SLUG": org.Slug, - "DATA_DIR": "/data", - "LOG_LEVEL": "debug", - }, - Guest: &guest, - Mounts: []fly.MachineMount{ - { - Path: "/data", - Volume: volume.ID, - Name: app.Name, - }, - }, - Services: []fly.MachineService{ - { - Protocol: "tcp", - InternalPort: 8080, - Autostop: fly.Pointer(fly.MachineAutostopOff), - Autostart: fly.BoolPointer(true), - MinMachinesRunning: fly.IntPointer(0), - Ports: []fly.MachinePort{ - { - Port: fly.IntPointer(80), - Handlers: []string{"http"}, - ForceHTTPS: true, - HTTPOptions: &fly.HTTPOptions{ - H2Backend: fly.BoolPointer(true), - }, - }, - { - Port: fly.IntPointer(443), - Handlers: []string{"http", "tls"}, - TLSOptions: &fly.TLSOptions{ - ALPN: []string{"h2"}, - }, - HTTPOptions: &fly.HTTPOptions{ - H2Backend: fly.BoolPointer(true), - }, - }, - }, - ForceInstanceKey: nil, - }, - }, - Image: lo.Ternary(org.RemoteBuilderImage != "", org.RemoteBuilderImage, "docker-hub-mirror.fly.io/flyio/rchab:sha-9346699"), - }, + Region: region, + Config: config, + MinSecretsVersion: minvers, }) if retErr != nil { tracing.RecordError(span, retErr, "error launching builder machine") @@ -413,6 +541,29 @@ func createBuilder(ctx context.Context, org *fly.Organization, region, builderNa return } +func createFlyManagedBuilder(ctx context.Context, org *fly.Organization, region string) (app *fly.App, mach *fly.Machine, retErr error) { + ctx, span := tracing.GetTracer().Start(ctx, "create_builder") + defer span.End() + + uiexClient := uiexutil.ClientFromContext(ctx) + + response, error := uiexClient.CreateFlyManagedBuilder(ctx, org.Slug, region) + if error != nil { + tracing.RecordError(span, retErr, "error creating managed builder") + return nil, nil, retErr + } + + builderApp := &fly.App{ + Name: response.Data.AppName, + } + + machine := &fly.Machine{ + ID: response.Data.MachineID, + } + + return builderApp, machine, nil +} + func restartBuilderMachine(ctx context.Context, builderMachine *fly.Machine) error { ctx, span := tracing.GetTracer().Start(ctx, "restart_builder_machine") defer span.End() diff --git a/internal/build/imgsrc/ensure_builder_test.go b/internal/build/imgsrc/ensure_builder_test.go index b829086b25..0576f988b0 100644 --- a/internal/build/imgsrc/ensure_builder_test.go +++ b/internal/build/imgsrc/ensure_builder_test.go @@ -13,11 +13,21 @@ import ( "github.com/superfly/flyctl/internal/flapsutil" "github.com/superfly/flyctl/internal/flyutil" "github.com/superfly/flyctl/internal/mock" + "github.com/superfly/flyctl/internal/state" + "go.uber.org/mock/gomock" ) -func TestValidateBuilder(t *testing.T) { - t.Parallel() +//go:generate go run go.uber.org/mock/mockgen -package imgsrc -destination flaps_mock_test.go github.com/superfly/flyctl/internal/flapsutil FlapsClient + +func testingContext(t *testing.T) context.Context { ctx := context.Background() + ctx = state.WithConfigDirectory(ctx, t.TempDir()) + return ctx +} + +func TestValidateBuilder(t *testing.T) { + ctx := testingContext(t) + p := NewProvisioner(&fly.Organization{}) hasVolumes := false hasMachines := false @@ -44,24 +54,24 @@ func TestValidateBuilder(t *testing.T) { } ctx = flapsutil.NewContextWithClient(ctx, &flapsClient) - _, err := validateBuilder(ctx, nil) + _, err := p.validateBuilder(ctx, nil) assert.EqualError(t, err, NoBuilderApp.Error()) - _, err = validateBuilder(ctx, &fly.App{}) + _, err = p.validateBuilder(ctx, &fly.App{}) assert.EqualError(t, err, NoBuilderVolume.Error()) hasVolumes = true - _, err = validateBuilder(ctx, &fly.App{}) + _, err = p.validateBuilder(ctx, &fly.App{}) assert.EqualError(t, err, InvalidMachineCount.Error()) hasMachines = true - _, err = validateBuilder(ctx, &fly.App{}) + _, err = p.validateBuilder(ctx, &fly.App{}) assert.NoError(t, err) } func TestValidateBuilderAPIErrors(t *testing.T) { - t.Parallel() - ctx := context.Background() + ctx := testingContext(t) + p := NewProvisioner(&fly.Organization{}) maxVolumeRetries := 3 volumeRetries := 0 @@ -110,18 +120,18 @@ func TestValidateBuilderAPIErrors(t *testing.T) { ctx = flapsutil.NewContextWithClient(ctx, &flapsClient) volumesShouldFail = true - _, err := validateBuilder(ctx, &fly.App{}) + _, err := p.validateBuilder(ctx, &fly.App{}) assert.NoError(t, err) volumeRetries = 0 maxVolumeRetries = 7 - _, err = validateBuilder(ctx, &fly.App{}) + _, err = p.validateBuilder(ctx, &fly.App{}) assert.Error(t, err) volumeRetries = 0 responseStatusCode = 404 // we should only try once if the error is not a server error - _, err = validateBuilder(ctx, &fly.App{}) + _, err = p.validateBuilder(ctx, &fly.App{}) var flapsErr *flaps.FlapsError assert.True(t, errors.As(err, &flapsErr)) assert.Equal(t, 404, flapsErr.ResponseStatusCode) @@ -130,29 +140,49 @@ func TestValidateBuilderAPIErrors(t *testing.T) { volumesShouldFail = false machinesShouldFail = true responseStatusCode = 500 - _, err = validateBuilder(ctx, &fly.App{}) + _, err = p.validateBuilder(ctx, &fly.App{}) assert.NoError(t, err) machineRetries = 0 maxMachineRetries = 7 - _, err = validateBuilder(ctx, &fly.App{}) + _, err = p.validateBuilder(ctx, &fly.App{}) assert.Error(t, err) machineRetries = 0 responseStatusCode = 404 // we should only try once if the error is not a server error - _, err = validateBuilder(ctx, &fly.App{}) + _, err = p.validateBuilder(ctx, &fly.App{}) assert.True(t, errors.As(err, &flapsErr)) assert.Equal(t, 404, flapsErr.ResponseStatusCode) assert.Equal(t, 1, machineRetries) } +func TestValidateBuilderNotStarted(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + client := NewMockFlapsClient(ctrl) + + ctx := testingContext(t) + ctx = flapsutil.NewContextWithClient(ctx, client) + + provisioner := NewProvisioner(&fly.Organization{}) + provisioner.useVolume = false + + client.EXPECT().List(gomock.Any(), gomock.Any()).Return([]*fly.Machine{ + {State: "stopped"}, + }, nil) + machine, err := provisioner.validateBuilder(ctx, &fly.App{}) + assert.ErrorIs(t, err, BuilderMachineNotStarted) + assert.NotNil(t, machine, "Go functions usually return either a value or an error, but this is not") +} + func TestCreateBuilder(t *testing.T) { - t.Parallel() - ctx := context.Background() + ctx := testingContext(t) org := &fly.Organization{ Slug: "bigorg", } + p := NewProvisioner(org) createAppShouldFail := false allocateIPAddressShouldFail := false @@ -225,44 +255,44 @@ func TestCreateBuilder(t *testing.T) { ctx = flyutil.NewContextWithClient(ctx, &apiClient) ctx = flapsutil.NewContextWithClient(ctx, &flapsClient) - app, machine, err := createBuilder(ctx, org, "ord", "builder") + app, machine, err := p.createBuilder(ctx, "ord", "builder") assert.NoError(t, err) assert.Equal(t, "bigmachine", machine.ID) assert.Equal(t, app.Name, "builder") createAppShouldFail = true - _, _, err = createBuilder(ctx, org, "ord", "builder") + _, _, err = p.createBuilder(ctx, "ord", "builder") assert.Error(t, err) createAppShouldFail = false allocateIPAddressShouldFail = true - _, _, err = createBuilder(ctx, org, "ord", "builder") + _, _, err = p.createBuilder(ctx, "ord", "builder") assert.Error(t, err) allocateIPAddressShouldFail = false waitForAppShouldFail = true - _, _, err = createBuilder(ctx, org, "ord", "builder") + _, _, err = p.createBuilder(ctx, "ord", "builder") assert.Error(t, err) waitForAppShouldFail = false createVolumeShouldFail = true - _, _, err = createBuilder(ctx, org, "ord", "builder") + _, _, err = p.createBuilder(ctx, "ord", "builder") assert.NoError(t, err) createVolumeAttempts = 0 maxCreateVolumeAttempts = 7 - _, _, err = createBuilder(ctx, org, "ord", "builder") + _, _, err = p.createBuilder(ctx, "ord", "builder") assert.Error(t, err) createVolumeShouldFail = false launchShouldFail = true - _, _, err = createBuilder(ctx, org, "ord", "builder") + _, _, err = p.createBuilder(ctx, "ord", "builder") assert.Error(t, err) } func TestRestartBuilderMachine(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx := testingContext(t) couldNotReserveResources := false flapsClient := mock.FlapsClient{ diff --git a/internal/build/imgsrc/flaps_mock_test.go b/internal/build/imgsrc/flaps_mock_test.go new file mode 100644 index 0000000000..dfec7fa945 --- /dev/null +++ b/internal/build/imgsrc/flaps_mock_test.go @@ -0,0 +1,706 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/superfly/flyctl/internal/flapsutil (interfaces: FlapsClient) +// +// Generated by this command: +// +// mockgen -package imgsrc -destination flaps_mock_test.go github.com/superfly/flyctl/internal/flapsutil FlapsClient +// + +// Package imgsrc is a generated GoMock package. +package imgsrc + +import ( + context "context" + http "net/http" + reflect "reflect" + time "time" + + fly "github.com/superfly/fly-go" + gomock "go.uber.org/mock/gomock" +) + +// MockFlapsClient is a mock of FlapsClient interface. +type MockFlapsClient struct { + ctrl *gomock.Controller + recorder *MockFlapsClientMockRecorder + isgomock struct{} +} + +// MockFlapsClientMockRecorder is the mock recorder for MockFlapsClient. +type MockFlapsClientMockRecorder struct { + mock *MockFlapsClient +} + +// NewMockFlapsClient creates a new mock instance. +func NewMockFlapsClient(ctrl *gomock.Controller) *MockFlapsClient { + mock := &MockFlapsClient{ctrl: ctrl} + mock.recorder = &MockFlapsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockFlapsClient) EXPECT() *MockFlapsClientMockRecorder { + return m.recorder +} + +// AcquireLease mocks base method. +func (m *MockFlapsClient) AcquireLease(ctx context.Context, machineID string, ttl *int) (*fly.MachineLease, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AcquireLease", ctx, machineID, ttl) + ret0, _ := ret[0].(*fly.MachineLease) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AcquireLease indicates an expected call of AcquireLease. +func (mr *MockFlapsClientMockRecorder) AcquireLease(ctx, machineID, ttl any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AcquireLease", reflect.TypeOf((*MockFlapsClient)(nil).AcquireLease), ctx, machineID, ttl) +} + +// Cordon mocks base method. +func (m *MockFlapsClient) Cordon(ctx context.Context, machineID, nonce string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Cordon", ctx, machineID, nonce) + ret0, _ := ret[0].(error) + return ret0 +} + +// Cordon indicates an expected call of Cordon. +func (mr *MockFlapsClientMockRecorder) Cordon(ctx, machineID, nonce any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Cordon", reflect.TypeOf((*MockFlapsClient)(nil).Cordon), ctx, machineID, nonce) +} + +// CreateApp mocks base method. +func (m *MockFlapsClient) CreateApp(ctx context.Context, name, org string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateApp", ctx, name, org) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateApp indicates an expected call of CreateApp. +func (mr *MockFlapsClientMockRecorder) CreateApp(ctx, name, org any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateApp", reflect.TypeOf((*MockFlapsClient)(nil).CreateApp), ctx, name, org) +} + +// CreateVolume mocks base method. +func (m *MockFlapsClient) CreateVolume(ctx context.Context, req fly.CreateVolumeRequest) (*fly.Volume, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateVolume", ctx, req) + ret0, _ := ret[0].(*fly.Volume) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateVolume indicates an expected call of CreateVolume. +func (mr *MockFlapsClientMockRecorder) CreateVolume(ctx, req any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateVolume", reflect.TypeOf((*MockFlapsClient)(nil).CreateVolume), ctx, req) +} + +// CreateVolumeSnapshot mocks base method. +func (m *MockFlapsClient) CreateVolumeSnapshot(ctx context.Context, volumeId string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateVolumeSnapshot", ctx, volumeId) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateVolumeSnapshot indicates an expected call of CreateVolumeSnapshot. +func (mr *MockFlapsClientMockRecorder) CreateVolumeSnapshot(ctx, volumeId any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateVolumeSnapshot", reflect.TypeOf((*MockFlapsClient)(nil).CreateVolumeSnapshot), ctx, volumeId) +} + +// DeleteAppSecret mocks base method. +func (m *MockFlapsClient) DeleteAppSecret(ctx context.Context, name string) (*fly.DeleteAppSecretResp, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteAppSecret", ctx, name) + ret0, _ := ret[0].(*fly.DeleteAppSecretResp) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteAppSecret indicates an expected call of DeleteAppSecret. +func (mr *MockFlapsClientMockRecorder) DeleteAppSecret(ctx, name any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAppSecret", reflect.TypeOf((*MockFlapsClient)(nil).DeleteAppSecret), ctx, name) +} + +// DeleteMetadata mocks base method. +func (m *MockFlapsClient) DeleteMetadata(ctx context.Context, machineID, key string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteMetadata", ctx, machineID, key) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteMetadata indicates an expected call of DeleteMetadata. +func (mr *MockFlapsClientMockRecorder) DeleteMetadata(ctx, machineID, key any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteMetadata", reflect.TypeOf((*MockFlapsClient)(nil).DeleteMetadata), ctx, machineID, key) +} + +// DeleteSecretKey mocks base method. +func (m *MockFlapsClient) DeleteSecretKey(ctx context.Context, name string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteSecretKey", ctx, name) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteSecretKey indicates an expected call of DeleteSecretKey. +func (mr *MockFlapsClientMockRecorder) DeleteSecretKey(ctx, name any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSecretKey", reflect.TypeOf((*MockFlapsClient)(nil).DeleteSecretKey), ctx, name) +} + +// DeleteVolume mocks base method. +func (m *MockFlapsClient) DeleteVolume(ctx context.Context, volumeId string) (*fly.Volume, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteVolume", ctx, volumeId) + ret0, _ := ret[0].(*fly.Volume) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteVolume indicates an expected call of DeleteVolume. +func (mr *MockFlapsClientMockRecorder) DeleteVolume(ctx, volumeId any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteVolume", reflect.TypeOf((*MockFlapsClient)(nil).DeleteVolume), ctx, volumeId) +} + +// Destroy mocks base method. +func (m *MockFlapsClient) Destroy(ctx context.Context, input fly.RemoveMachineInput, nonce string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Destroy", ctx, input, nonce) + ret0, _ := ret[0].(error) + return ret0 +} + +// Destroy indicates an expected call of Destroy. +func (mr *MockFlapsClientMockRecorder) Destroy(ctx, input, nonce any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Destroy", reflect.TypeOf((*MockFlapsClient)(nil).Destroy), ctx, input, nonce) +} + +// Exec mocks base method. +func (m *MockFlapsClient) Exec(ctx context.Context, machineID string, in *fly.MachineExecRequest) (*fly.MachineExecResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Exec", ctx, machineID, in) + ret0, _ := ret[0].(*fly.MachineExecResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Exec indicates an expected call of Exec. +func (mr *MockFlapsClientMockRecorder) Exec(ctx, machineID, in any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exec", reflect.TypeOf((*MockFlapsClient)(nil).Exec), ctx, machineID, in) +} + +// ExtendVolume mocks base method. +func (m *MockFlapsClient) ExtendVolume(ctx context.Context, volumeId string, size_gb int) (*fly.Volume, bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExtendVolume", ctx, volumeId, size_gb) + ret0, _ := ret[0].(*fly.Volume) + ret1, _ := ret[1].(bool) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// ExtendVolume indicates an expected call of ExtendVolume. +func (mr *MockFlapsClientMockRecorder) ExtendVolume(ctx, volumeId, size_gb any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExtendVolume", reflect.TypeOf((*MockFlapsClient)(nil).ExtendVolume), ctx, volumeId, size_gb) +} + +// FindLease mocks base method. +func (m *MockFlapsClient) FindLease(ctx context.Context, machineID string) (*fly.MachineLease, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FindLease", ctx, machineID) + ret0, _ := ret[0].(*fly.MachineLease) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FindLease indicates an expected call of FindLease. +func (mr *MockFlapsClientMockRecorder) FindLease(ctx, machineID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindLease", reflect.TypeOf((*MockFlapsClient)(nil).FindLease), ctx, machineID) +} + +// GenerateSecretKey mocks base method. +func (m *MockFlapsClient) GenerateSecretKey(ctx context.Context, name, typ string) (*fly.SetSecretKeyResp, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GenerateSecretKey", ctx, name, typ) + ret0, _ := ret[0].(*fly.SetSecretKeyResp) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GenerateSecretKey indicates an expected call of GenerateSecretKey. +func (mr *MockFlapsClientMockRecorder) GenerateSecretKey(ctx, name, typ any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateSecretKey", reflect.TypeOf((*MockFlapsClient)(nil).GenerateSecretKey), ctx, name, typ) +} + +// Get mocks base method. +func (m *MockFlapsClient) Get(ctx context.Context, machineID string) (*fly.Machine, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, machineID) + ret0, _ := ret[0].(*fly.Machine) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockFlapsClientMockRecorder) Get(ctx, machineID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockFlapsClient)(nil).Get), ctx, machineID) +} + +// GetAllVolumes mocks base method. +func (m *MockFlapsClient) GetAllVolumes(ctx context.Context) ([]fly.Volume, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAllVolumes", ctx) + ret0, _ := ret[0].([]fly.Volume) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAllVolumes indicates an expected call of GetAllVolumes. +func (mr *MockFlapsClientMockRecorder) GetAllVolumes(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllVolumes", reflect.TypeOf((*MockFlapsClient)(nil).GetAllVolumes), ctx) +} + +// GetMany mocks base method. +func (m *MockFlapsClient) GetMany(ctx context.Context, machineIDs []string) ([]*fly.Machine, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMany", ctx, machineIDs) + ret0, _ := ret[0].([]*fly.Machine) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMany indicates an expected call of GetMany. +func (mr *MockFlapsClientMockRecorder) GetMany(ctx, machineIDs any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMany", reflect.TypeOf((*MockFlapsClient)(nil).GetMany), ctx, machineIDs) +} + +// GetMetadata mocks base method. +func (m *MockFlapsClient) GetMetadata(ctx context.Context, machineID string) (map[string]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMetadata", ctx, machineID) + ret0, _ := ret[0].(map[string]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMetadata indicates an expected call of GetMetadata. +func (mr *MockFlapsClientMockRecorder) GetMetadata(ctx, machineID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMetadata", reflect.TypeOf((*MockFlapsClient)(nil).GetMetadata), ctx, machineID) +} + +// GetProcesses mocks base method. +func (m *MockFlapsClient) GetProcesses(ctx context.Context, machineID string) (fly.MachinePsResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProcesses", ctx, machineID) + ret0, _ := ret[0].(fly.MachinePsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProcesses indicates an expected call of GetProcesses. +func (mr *MockFlapsClientMockRecorder) GetProcesses(ctx, machineID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProcesses", reflect.TypeOf((*MockFlapsClient)(nil).GetProcesses), ctx, machineID) +} + +// GetVolume mocks base method. +func (m *MockFlapsClient) GetVolume(ctx context.Context, volumeId string) (*fly.Volume, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVolume", ctx, volumeId) + ret0, _ := ret[0].(*fly.Volume) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetVolume indicates an expected call of GetVolume. +func (mr *MockFlapsClientMockRecorder) GetVolume(ctx, volumeId any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVolume", reflect.TypeOf((*MockFlapsClient)(nil).GetVolume), ctx, volumeId) +} + +// GetVolumeSnapshots mocks base method. +func (m *MockFlapsClient) GetVolumeSnapshots(ctx context.Context, volumeId string) ([]fly.VolumeSnapshot, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVolumeSnapshots", ctx, volumeId) + ret0, _ := ret[0].([]fly.VolumeSnapshot) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetVolumeSnapshots indicates an expected call of GetVolumeSnapshots. +func (mr *MockFlapsClientMockRecorder) GetVolumeSnapshots(ctx, volumeId any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVolumeSnapshots", reflect.TypeOf((*MockFlapsClient)(nil).GetVolumeSnapshots), ctx, volumeId) +} + +// GetVolumes mocks base method. +func (m *MockFlapsClient) GetVolumes(ctx context.Context) ([]fly.Volume, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVolumes", ctx) + ret0, _ := ret[0].([]fly.Volume) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetVolumes indicates an expected call of GetVolumes. +func (mr *MockFlapsClientMockRecorder) GetVolumes(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVolumes", reflect.TypeOf((*MockFlapsClient)(nil).GetVolumes), ctx) +} + +// Kill mocks base method. +func (m *MockFlapsClient) Kill(ctx context.Context, machineID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Kill", ctx, machineID) + ret0, _ := ret[0].(error) + return ret0 +} + +// Kill indicates an expected call of Kill. +func (mr *MockFlapsClientMockRecorder) Kill(ctx, machineID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Kill", reflect.TypeOf((*MockFlapsClient)(nil).Kill), ctx, machineID) +} + +// Launch mocks base method. +func (m *MockFlapsClient) Launch(ctx context.Context, builder fly.LaunchMachineInput) (*fly.Machine, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Launch", ctx, builder) + ret0, _ := ret[0].(*fly.Machine) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Launch indicates an expected call of Launch. +func (mr *MockFlapsClientMockRecorder) Launch(ctx, builder any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Launch", reflect.TypeOf((*MockFlapsClient)(nil).Launch), ctx, builder) +} + +// List mocks base method. +func (m *MockFlapsClient) List(ctx context.Context, state string) ([]*fly.Machine, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "List", ctx, state) + ret0, _ := ret[0].([]*fly.Machine) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// List indicates an expected call of List. +func (mr *MockFlapsClientMockRecorder) List(ctx, state any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockFlapsClient)(nil).List), ctx, state) +} + +// ListActive mocks base method. +func (m *MockFlapsClient) ListActive(ctx context.Context) ([]*fly.Machine, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListActive", ctx) + ret0, _ := ret[0].([]*fly.Machine) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListActive indicates an expected call of ListActive. +func (mr *MockFlapsClientMockRecorder) ListActive(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListActive", reflect.TypeOf((*MockFlapsClient)(nil).ListActive), ctx) +} + +// ListAppSecrets mocks base method. +func (m *MockFlapsClient) ListAppSecrets(ctx context.Context, version *uint64, showSecrets bool) ([]fly.AppSecret, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListAppSecrets", ctx, version, showSecrets) + ret0, _ := ret[0].([]fly.AppSecret) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListAppSecrets indicates an expected call of ListAppSecrets. +func (mr *MockFlapsClientMockRecorder) ListAppSecrets(ctx, version, showSecrets any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAppSecrets", reflect.TypeOf((*MockFlapsClient)(nil).ListAppSecrets), ctx, version, showSecrets) +} + +// ListFlyAppsMachines mocks base method. +func (m *MockFlapsClient) ListFlyAppsMachines(ctx context.Context) ([]*fly.Machine, *fly.Machine, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListFlyAppsMachines", ctx) + ret0, _ := ret[0].([]*fly.Machine) + ret1, _ := ret[1].(*fly.Machine) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// ListFlyAppsMachines indicates an expected call of ListFlyAppsMachines. +func (mr *MockFlapsClientMockRecorder) ListFlyAppsMachines(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListFlyAppsMachines", reflect.TypeOf((*MockFlapsClient)(nil).ListFlyAppsMachines), ctx) +} + +// ListSecretKeys mocks base method. +func (m *MockFlapsClient) ListSecretKeys(ctx context.Context, version *uint64) ([]fly.SecretKey, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListSecretKeys", ctx, version) + ret0, _ := ret[0].([]fly.SecretKey) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListSecretKeys indicates an expected call of ListSecretKeys. +func (mr *MockFlapsClientMockRecorder) ListSecretKeys(ctx, version any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSecretKeys", reflect.TypeOf((*MockFlapsClient)(nil).ListSecretKeys), ctx, version) +} + +// NewRequest mocks base method. +func (m *MockFlapsClient) NewRequest(ctx context.Context, method, path string, in any, headers map[string][]string) (*http.Request, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewRequest", ctx, method, path, in, headers) + ret0, _ := ret[0].(*http.Request) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NewRequest indicates an expected call of NewRequest. +func (mr *MockFlapsClientMockRecorder) NewRequest(ctx, method, path, in, headers any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewRequest", reflect.TypeOf((*MockFlapsClient)(nil).NewRequest), ctx, method, path, in, headers) +} + +// RefreshLease mocks base method. +func (m *MockFlapsClient) RefreshLease(ctx context.Context, machineID string, ttl *int, nonce string) (*fly.MachineLease, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RefreshLease", ctx, machineID, ttl, nonce) + ret0, _ := ret[0].(*fly.MachineLease) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RefreshLease indicates an expected call of RefreshLease. +func (mr *MockFlapsClientMockRecorder) RefreshLease(ctx, machineID, ttl, nonce any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RefreshLease", reflect.TypeOf((*MockFlapsClient)(nil).RefreshLease), ctx, machineID, ttl, nonce) +} + +// ReleaseLease mocks base method. +func (m *MockFlapsClient) ReleaseLease(ctx context.Context, machineID, nonce string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReleaseLease", ctx, machineID, nonce) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReleaseLease indicates an expected call of ReleaseLease. +func (mr *MockFlapsClientMockRecorder) ReleaseLease(ctx, machineID, nonce any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReleaseLease", reflect.TypeOf((*MockFlapsClient)(nil).ReleaseLease), ctx, machineID, nonce) +} + +// Restart mocks base method. +func (m *MockFlapsClient) Restart(ctx context.Context, in fly.RestartMachineInput, nonce string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Restart", ctx, in, nonce) + ret0, _ := ret[0].(error) + return ret0 +} + +// Restart indicates an expected call of Restart. +func (mr *MockFlapsClientMockRecorder) Restart(ctx, in, nonce any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Restart", reflect.TypeOf((*MockFlapsClient)(nil).Restart), ctx, in, nonce) +} + +// SetAppSecret mocks base method. +func (m *MockFlapsClient) SetAppSecret(ctx context.Context, name, value string) (*fly.SetAppSecretResp, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetAppSecret", ctx, name, value) + ret0, _ := ret[0].(*fly.SetAppSecretResp) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SetAppSecret indicates an expected call of SetAppSecret. +func (mr *MockFlapsClientMockRecorder) SetAppSecret(ctx, name, value any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetAppSecret", reflect.TypeOf((*MockFlapsClient)(nil).SetAppSecret), ctx, name, value) +} + +// SetMetadata mocks base method. +func (m *MockFlapsClient) SetMetadata(ctx context.Context, machineID, key, value string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetMetadata", ctx, machineID, key, value) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetMetadata indicates an expected call of SetMetadata. +func (mr *MockFlapsClientMockRecorder) SetMetadata(ctx, machineID, key, value any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetMetadata", reflect.TypeOf((*MockFlapsClient)(nil).SetMetadata), ctx, machineID, key, value) +} + +// SetSecretKey mocks base method. +func (m *MockFlapsClient) SetSecretKey(ctx context.Context, name, typ string, value []byte) (*fly.SetSecretKeyResp, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetSecretKey", ctx, name, typ, value) + ret0, _ := ret[0].(*fly.SetSecretKeyResp) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SetSecretKey indicates an expected call of SetSecretKey. +func (mr *MockFlapsClientMockRecorder) SetSecretKey(ctx, name, typ, value any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSecretKey", reflect.TypeOf((*MockFlapsClient)(nil).SetSecretKey), ctx, name, typ, value) +} + +// Start mocks base method. +func (m *MockFlapsClient) Start(ctx context.Context, machineID, nonce string) (*fly.MachineStartResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Start", ctx, machineID, nonce) + ret0, _ := ret[0].(*fly.MachineStartResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Start indicates an expected call of Start. +func (mr *MockFlapsClientMockRecorder) Start(ctx, machineID, nonce any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockFlapsClient)(nil).Start), ctx, machineID, nonce) +} + +// Stop mocks base method. +func (m *MockFlapsClient) Stop(ctx context.Context, in fly.StopMachineInput, nonce string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Stop", ctx, in, nonce) + ret0, _ := ret[0].(error) + return ret0 +} + +// Stop indicates an expected call of Stop. +func (mr *MockFlapsClientMockRecorder) Stop(ctx, in, nonce any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockFlapsClient)(nil).Stop), ctx, in, nonce) +} + +// Suspend mocks base method. +func (m *MockFlapsClient) Suspend(ctx context.Context, machineID, nonce string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Suspend", ctx, machineID, nonce) + ret0, _ := ret[0].(error) + return ret0 +} + +// Suspend indicates an expected call of Suspend. +func (mr *MockFlapsClientMockRecorder) Suspend(ctx, machineID, nonce any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Suspend", reflect.TypeOf((*MockFlapsClient)(nil).Suspend), ctx, machineID, nonce) +} + +// Uncordon mocks base method. +func (m *MockFlapsClient) Uncordon(ctx context.Context, machineID, nonce string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Uncordon", ctx, machineID, nonce) + ret0, _ := ret[0].(error) + return ret0 +} + +// Uncordon indicates an expected call of Uncordon. +func (mr *MockFlapsClientMockRecorder) Uncordon(ctx, machineID, nonce any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Uncordon", reflect.TypeOf((*MockFlapsClient)(nil).Uncordon), ctx, machineID, nonce) +} + +// Update mocks base method. +func (m *MockFlapsClient) Update(ctx context.Context, builder fly.LaunchMachineInput, nonce string) (*fly.Machine, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Update", ctx, builder, nonce) + ret0, _ := ret[0].(*fly.Machine) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Update indicates an expected call of Update. +func (mr *MockFlapsClientMockRecorder) Update(ctx, builder, nonce any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockFlapsClient)(nil).Update), ctx, builder, nonce) +} + +// UpdateAppSecrets mocks base method. +func (m *MockFlapsClient) UpdateAppSecrets(ctx context.Context, values map[string]*string) (*fly.UpdateAppSecretsResp, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateAppSecrets", ctx, values) + ret0, _ := ret[0].(*fly.UpdateAppSecretsResp) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateAppSecrets indicates an expected call of UpdateAppSecrets. +func (mr *MockFlapsClientMockRecorder) UpdateAppSecrets(ctx, values any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAppSecrets", reflect.TypeOf((*MockFlapsClient)(nil).UpdateAppSecrets), ctx, values) +} + +// UpdateVolume mocks base method. +func (m *MockFlapsClient) UpdateVolume(ctx context.Context, volumeId string, req fly.UpdateVolumeRequest) (*fly.Volume, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateVolume", ctx, volumeId, req) + ret0, _ := ret[0].(*fly.Volume) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateVolume indicates an expected call of UpdateVolume. +func (mr *MockFlapsClientMockRecorder) UpdateVolume(ctx, volumeId, req any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateVolume", reflect.TypeOf((*MockFlapsClient)(nil).UpdateVolume), ctx, volumeId, req) +} + +// Wait mocks base method. +func (m *MockFlapsClient) Wait(ctx context.Context, machine *fly.Machine, state string, timeout time.Duration) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Wait", ctx, machine, state, timeout) + ret0, _ := ret[0].(error) + return ret0 +} + +// Wait indicates an expected call of Wait. +func (mr *MockFlapsClientMockRecorder) Wait(ctx, machine, state, timeout any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Wait", reflect.TypeOf((*MockFlapsClient)(nil).Wait), ctx, machine, state, timeout) +} + +// WaitForApp mocks base method. +func (m *MockFlapsClient) WaitForApp(ctx context.Context, name string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitForApp", ctx, name) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitForApp indicates an expected call of WaitForApp. +func (mr *MockFlapsClientMockRecorder) WaitForApp(ctx, name any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForApp", reflect.TypeOf((*MockFlapsClient)(nil).WaitForApp), ctx, name) +} diff --git a/internal/build/imgsrc/nixpacks_builder.go b/internal/build/imgsrc/nixpacks_builder.go index 23930d35ae..0d640f7b0c 100644 --- a/internal/build/imgsrc/nixpacks_builder.go +++ b/internal/build/imgsrc/nixpacks_builder.go @@ -22,7 +22,9 @@ import ( const nixpackInstallerURL string = "https://raw.githubusercontent.com/railwayapp/nixpacks/master/install.sh" -type nixpacksBuilder struct{} +type nixpacksBuilder struct { + provisioner *Provisioner +} func (*nixpacksBuilder) Name() string { return "Nixpacks" @@ -97,7 +99,7 @@ func ensureNixpacksBinary(ctx context.Context, streams *iostreams.IOStreams) err return err } -func (*nixpacksBuilder) Run(ctx context.Context, dockerFactory *dockerClientFactory, streams *iostreams.IOStreams, opts ImageOptions, build *build) (*DeploymentImage, string, error) { +func (b *nixpacksBuilder) Run(ctx context.Context, dockerFactory *dockerClientFactory, streams *iostreams.IOStreams, opts ImageOptions, build *build) (*DeploymentImage, string, error) { build.BuildStart() if !dockerFactory.mode.IsAvailable() { note := "docker daemon not available, skipping" @@ -130,13 +132,10 @@ func (*nixpacksBuilder) Run(ctx context.Context, dockerFactory *dockerClientFact return nil, "", err } - machine, app, err := remoteBuilderMachine(ctx, dockerFactory.apiClient, dockerFactory.appName, false) + machine, app, err := b.provisioner.EnsureBuilder(ctx, os.Getenv("FLY_REMOTE_BUILDER_REGION"), false) if err != nil { - build.BuilderInitFinish() - build.BuildFinish() return nil, "", err } - remoteHost := machine.PrivateIP if remoteHost == "" { diff --git a/internal/build/imgsrc/remote_image_resolver.go b/internal/build/imgsrc/remote_image_resolver.go index 3c146e34cb..3e7cf11117 100644 --- a/internal/build/imgsrc/remote_image_resolver.go +++ b/internal/build/imgsrc/remote_image_resolver.go @@ -5,14 +5,18 @@ import ( "fmt" "strconv" - "github.com/superfly/flyctl/internal/flyutil" + "github.com/superfly/fly-go" "github.com/superfly/flyctl/internal/tracing" "github.com/superfly/flyctl/iostreams" "go.opentelemetry.io/otel/trace" ) +type flyClient interface { + ResolveImageForApp(ctx context.Context, appName, imageRef string) (*fly.Image, error) +} + type remoteImageResolver struct { - flyApi flyutil.Client + flyApi flyClient } func (*remoteImageResolver) Name() string { @@ -46,9 +50,10 @@ func (s *remoteImageResolver) Run(ctx context.Context, _ *dockerClientFactory, s } di := &DeploymentImage{ - ID: img.ID, - Tag: img.Ref, - Size: int64(size), + ID: img.ID, + Tag: img.Ref, + Digest: img.Digest, + Size: int64(size), } span.SetAttributes(di.ToSpanAttributes()...) diff --git a/internal/build/imgsrc/resolver.go b/internal/build/imgsrc/resolver.go index 052661569b..2f031a1b38 100644 --- a/internal/build/imgsrc/resolver.go +++ b/internal/build/imgsrc/resolver.go @@ -12,6 +12,7 @@ import ( "sync" "time" + "github.com/cenkalti/backoff/v5" "github.com/pkg/errors" "github.com/vektah/gqlparser/v2/gqlerror" "go.opentelemetry.io/otel/attribute" @@ -27,7 +28,6 @@ import ( "github.com/superfly/flyctl/internal/sentry" "github.com/superfly/flyctl/internal/tracing" "github.com/superfly/flyctl/iostreams" - "github.com/superfly/flyctl/retry" "github.com/superfly/flyctl/terminal" ) @@ -54,7 +54,8 @@ type ImageOptions struct { BuildpacksDockerHost string BuildpacksVolumes []string UseOverlaybd bool - UseZstd bool + Compression string + CompressionLevel int } func (io ImageOptions) ToSpanAttributes() []attribute.KeyValue { @@ -73,7 +74,8 @@ func (io ImageOptions) ToSpanAttributes() []attribute.KeyValue { attribute.String("imageoptions.buildpacks_docker_host", io.BuildpacksDockerHost), attribute.StringSlice("imageoptions.buildpacks", io.Buildpacks), attribute.StringSlice("imageoptions.buildpacks_volumes", io.BuildpacksVolumes), - attribute.Bool("imageoptions.use_zstd", io.UseZstd), + attribute.String("imageoptions.compression", io.Compression), + attribute.Int("imageoptions.compressionLevel", io.CompressionLevel), } if io.BuildArgs != nil { @@ -118,14 +120,23 @@ func (ro RefOptions) ToSpanAttributes() []attribute.KeyValue { } type DeploymentImage struct { - ID string - Tag string - Size int64 - BuildID string - Labels map[string]string + ID string + Tag string + Digest string + Size int64 + BuildID string + BuilderID string + Labels map[string]string } -func (di DeploymentImage) ToSpanAttributes() []attribute.KeyValue { +func (di *DeploymentImage) String() string { + if di.Digest == "" { + return di.Tag + } + return fmt.Sprintf("%s@%s", di.Tag, di.Digest) +} + +func (di *DeploymentImage) ToSpanAttributes() []attribute.KeyValue { attrs := []attribute.KeyValue{ attribute.String("image.id", di.ID), attribute.String("image.tag", di.Tag), @@ -141,9 +152,16 @@ func (di DeploymentImage) ToSpanAttributes() []attribute.KeyValue { } type Resolver struct { + // appName is the name of the app that the resolver is going to build. + appName string + apiClient flyutil.Client + heartbeatFn func(ctx context.Context, client *dockerclient.Client, req *http.Request) error + recreateBuilder bool + // provisioner is responsible for provisioning a builder machine remotely. + provisioner *Provisioner + // dockerFactory is a factory for creating docker clients. + // Some strategies don't need it, but it won't be nil. dockerFactory *dockerClientFactory - apiClient flyutil.Client - heartbeatFn func(ctx context.Context, client *dockerclient.Client, req *http.Request) error } type StopSignal struct { @@ -235,12 +253,17 @@ func (r *Resolver) BuildImage(ctx context.Context, streams *iostreams.IOStreams, builderScope = DepotBuilderScopeApp default: return nil, fmt.Errorf("invalid depot-scope value. must be 'org' or 'app'") - } - if r.dockerFactory.mode.UseNixpacks() { - strategies = append(strategies, &nixpacksBuilder{}) - } else if r.dockerFactory.mode.UseDepot() && len(opts.Buildpacks) == 0 && opts.Builder == "" && opts.BuiltIn == "" { + if r.provisioner.UseBuildkit() { + strategies = append(strategies, NewBuildkitBuilder(flag.GetBuildkitAddr(ctx), r.provisioner)) + } else if r.dockerFactory.mode.UseNixpacks() { + org, err := r.apiClient.GetOrganizationByApp(ctx, opts.AppName) + if err != nil { + return nil, err + } + strategies = append(strategies, &nixpacksBuilder{provisioner: NewProvisioner(org)}) + } else if (r.dockerFactory.mode.UseDepot() && !r.dockerFactory.mode.UseManagedBuilder()) && len(opts.Buildpacks) == 0 && opts.Builder == "" && opts.BuiltIn == "" { strategies = append(strategies, &DepotBuilder{Scope: builderScope}) } else { strategies = []imageBuilder{ @@ -282,6 +305,7 @@ func (r *Resolver) BuildImage(ctx context.Context, streams *iostreams.IOStreams, // we should only set the image's buildID if we push the build info to web img.BuildID = buildResult.BuildId } + img.BuilderID = bld.BuilderMeta.RemoteMachineId return img, nil } @@ -347,7 +371,7 @@ func (r *Resolver) createBuildGql(ctx context.Context, strategiesAvailable []str } input := fly.CreateBuildInput{ - AppName: r.dockerFactory.appName, + AppName: r.appName, BuilderType: builderType, ImageOpts: *imageOpts, MachineId: "", @@ -549,7 +573,7 @@ func (r *Resolver) finishBuild(ctx context.Context, build *build, failed bool, l } input := fly.FinishBuildInput{ BuildId: build.BuildId, - AppName: r.dockerFactory.appName, + AppName: r.appName, MachineId: "", Status: status, Logs: limitLogs(logs), @@ -642,8 +666,8 @@ func (r *Resolver) StartHeartbeat(ctx context.Context) (*StopSignal, error) { ctx, span := tracing.GetTracer().Start(ctx, "start_heartbeat") defer span.End() - if !r.dockerFactory.remote || r.dockerFactory.mode.UseDepot() { - span.AddEvent("won't check heartbeart of non-remote build") + if !r.dockerFactory.remote || r.dockerFactory.mode.UseDepot() || r.provisioner.UseBuildkit() { + span.AddEvent("won't check heartbeat of non-remote build") return nil, nil } @@ -675,9 +699,9 @@ func (r *Resolver) StartHeartbeat(ctx context.Context) (*StopSignal, error) { terminal.Debugf("Sending remote builder heartbeat pulse to %s...\n", heartbeatUrl) span.AddEvent("sending first heartbeat") - err = retry.Retry(ctx, func() error { - return r.heartbeatFn(ctx, dockerClient, heartbeatReq) - }, 3) + _, err = backoff.Retry(ctx, func() (any, error) { + return nil, r.heartbeatFn(ctx, dockerClient, heartbeatReq) + }, backoff.WithMaxTries(3)) if err != nil { var h *httpError if errors.As(err, &h) { @@ -759,14 +783,32 @@ func (s *StopSignal) Stop() { }) } -func NewResolver(daemonType DockerDaemonType, apiClient flyutil.Client, appName string, iostreams *iostreams.IOStreams, connectOverWireguard, recreateBuilder bool) *Resolver { - return &Resolver{ - dockerFactory: newDockerClientFactory(daemonType, apiClient, appName, iostreams, connectOverWireguard, recreateBuilder), - apiClient: apiClient, - heartbeatFn: heartbeat, +func WithProvisioner(provisioner *Provisioner) func(resolver *Resolver) { + return func(resolver *Resolver) { + resolver.provisioner = provisioner } } +func NewResolver( + daemonType DockerDaemonType, apiClient flyutil.Client, appName string, iostreams *iostreams.IOStreams, + connectOverWireguard, recreateBuilder bool, + opts ...func(resolver *Resolver), +) *Resolver { + resolver := &Resolver{ + appName: appName, + apiClient: apiClient, + heartbeatFn: heartbeat, + recreateBuilder: recreateBuilder, + } + + for _, opt := range opts { + opt(resolver) + } + + resolver.dockerFactory = newDockerClientFactory(daemonType, apiClient, appName, iostreams, connectOverWireguard, recreateBuilder) + return resolver +} + type imageBuilder interface { Name() string Run(ctx context.Context, dockerFactory *dockerClientFactory, streams *iostreams.IOStreams, opts ImageOptions, build *build) (*DeploymentImage, string, error) diff --git a/internal/build/imgsrc/resolver_test.go b/internal/build/imgsrc/resolver_test.go index d01312d6a7..0440d484bd 100644 --- a/internal/build/imgsrc/resolver_test.go +++ b/internal/build/imgsrc/resolver_test.go @@ -12,6 +12,19 @@ import ( "github.com/superfly/flyctl/internal/config" ) +func TestDeploymentImage(t *testing.T) { + image := &DeploymentImage{ + ID: "img_8rlxp2nzn32np3jq", + Tag: "docker-hub-mirror.fly.io/flyio/postgres-flex:16", + Digest: "sha256:f107dbfaa732063b31ee94aa728c4f5648a672259fd62bfaa245f9b7a53b5479", + Size: 123, + } + assert.Equal(t, "docker-hub-mirror.fly.io/flyio/postgres-flex:16@sha256:f107dbfaa732063b31ee94aa728c4f5648a672259fd62bfaa245f9b7a53b5479", image.String()) + + image.Digest = "" + assert.Equal(t, "docker-hub-mirror.fly.io/flyio/postgres-flex:16", image.String()) +} + func TestHeartbeat(t *testing.T) { dc, err := client.NewClientWithOpts() assert.NoError(t, err) @@ -46,6 +59,7 @@ func TestStartHeartbeat(t *testing.T) { heartbeatFn: func(ctx context.Context, client *client.Client, req *http.Request) error { return nil }, + provisioner: &Provisioner{}, } _, err = resolver.StartHeartbeat(ctx) @@ -80,6 +94,7 @@ func TestStartHeartbeatFirstRetry(t *testing.T) { } return nil }, + provisioner: &Provisioner{}, } _, err = resolver.StartHeartbeat(ctx) @@ -110,6 +125,7 @@ func TestStartHeartbeatNoEndpoint(t *testing.T) { StatusCode: http.StatusNotFound, } }, + provisioner: &Provisioner{}, } _, err = resolver.StartHeartbeat(ctx) @@ -140,6 +156,7 @@ func TestStartHeartbeatWError(t *testing.T) { StatusCode: http.StatusBadRequest, } }, + provisioner: &Provisioner{}, } _, err = resolver.StartHeartbeat(ctx) diff --git a/internal/buildinfo/buildinfo.go b/internal/buildinfo/buildinfo.go index 9665921d6c..0c6669ba38 100644 --- a/internal/buildinfo/buildinfo.go +++ b/internal/buildinfo/buildinfo.go @@ -116,8 +116,8 @@ func BuildTime() time.Time { func Commit() string { info, _ := debug.ReadBuildInfo() - var rev string = "" - var dirty string = "" + var rev = "" + var dirty = "" for _, v := range info.Settings { if v.Key == "vcs.revision" { rev = v.Value diff --git a/internal/certificate/errors.go b/internal/certificate/errors.go new file mode 100644 index 0000000000..bfaba702fe --- /dev/null +++ b/internal/certificate/errors.go @@ -0,0 +1,29 @@ +package certificate + +import ( + "fmt" + + "github.com/superfly/fly-go" + "github.com/superfly/flyctl/internal/format" + "github.com/superfly/flyctl/iostreams" +) + +// DisplayValidationErrors shows certificate validation errors in a user-friendly format +func DisplayValidationErrors(io *iostreams.IOStreams, errors []fly.AppCertificateValidationError) { + if len(errors) == 0 { + return + } + + cs := io.ColorScheme() + + fmt.Fprintf(io.Out, "\n%s\n", cs.Yellow("Certificate validation issues:")) + + for _, err := range errors { + fmt.Fprintf(io.Out, "\n %s\n", err.Message) + if err.Remediation != "" { + fmt.Fprintf(io.Out, " %s %s\n", cs.Bold("Fix:"), err.Remediation) + } + fmt.Fprintf(io.Out, " %s\n", + cs.Gray("Checked "+format.RelativeTime(err.Timestamp))) + } +} diff --git a/internal/cli/cli.go b/internal/cli/cli.go index 2132a61e49..2c752c94e4 100644 --- a/internal/cli/cli.go +++ b/internal/cli/cli.go @@ -67,10 +67,11 @@ func Run(ctx context.Context, io *iostreams.IOStreams, args ...string) int { cmd.SetErr(io.ErrOut) // Special case for the launch command, support `flyctl launch args -- [subargs]` + // and `flyctl mcp wrap --mcp script -- [subargs]` // Where the arguments after `--` are passed to the scanner/dockerfile generator. // This isn't supported natively by cobra, so we have to manually split the args // See: https://github.com/spf13/cobra/issues/739 - if len(args) > 0 && args[0] == "launch" { + if (len(args) > 0 && args[0] == "launch") || (len(args) > 2 && args[0] == "mcp" && args[1] == "wrap") { index := slices.Index(args, "--") if index >= 0 { ctx = flag.WithExtraArgs(ctx, args[index+1:]) diff --git a/internal/cmdutil/preparers/preparers.go b/internal/cmdutil/preparers/preparers.go index bdf744567c..3e7ac8986e 100644 --- a/internal/cmdutil/preparers/preparers.go +++ b/internal/cmdutil/preparers/preparers.go @@ -132,7 +132,7 @@ func ApplyAliases(ctx context.Context) (context.Context, error) { errorMessages = append(errorMessages, fmt.Sprintf("flags '%v' have different types", invalidTypes)) } if len(errorMessages) > 1 { - err = fmt.Errorf("multiple errors occured:\n > %s\n", strings.Join(errorMessages, "\n > ")) + err = fmt.Errorf("multiple errors occurred:\n > %s\n", strings.Join(errorMessages, "\n > ")) } else if len(errorMessages) == 1 { err = fmt.Errorf("%s", errorMessages[0]) } diff --git a/internal/command/apps/apps.go b/internal/command/apps/apps.go index b8ed6432c1..41c03093ae 100644 --- a/internal/command/apps/apps.go +++ b/internal/command/apps/apps.go @@ -53,7 +53,7 @@ func BuildContext(ctx context.Context, app *fly.AppCompact) (context.Context, er return nil, fmt.Errorf("can't establish agent %w", err) } - dialer, err := agentclient.Dialer(ctx, app.Organization.Slug, "") + dialer, err := agentclient.Dialer(ctx, app.Organization.Slug, app.Network) if err != nil { return nil, fmt.Errorf("can't build tunnel for %s: %s", app.Organization.Slug, err) } diff --git a/internal/command/apps/create.go b/internal/command/apps/create.go index 76f9ca188b..d3d703084f 100644 --- a/internal/command/apps/create.go +++ b/internal/command/apps/create.go @@ -10,6 +10,7 @@ import ( "github.com/superfly/fly-go/flaps" "github.com/superfly/flyctl/iostreams" + "github.com/superfly/flyctl/internal/appconfig" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/config" "github.com/superfly/flyctl/internal/flag" @@ -17,6 +18,7 @@ import ( "github.com/superfly/flyctl/internal/flyutil" "github.com/superfly/flyctl/internal/prompt" "github.com/superfly/flyctl/internal/render" + "github.com/superfly/flyctl/internal/state" ) func newCreate() (cmd *cobra.Command) { @@ -54,6 +56,11 @@ fetch one with 'fly config save -a '.` Description: "Use the machines platform", Hidden: true, }, + flag.Yes(), + flag.Bool{ + Name: "save", + Description: "Save the app name to the config file", + }, flag.Org(), ) @@ -123,5 +130,34 @@ func RunCreate(ctx context.Context) (err error) { } fmt.Fprintf(io.Out, "New app created: %s\n", app.Name) + + if flag.GetBool(ctx, "save") { + path := state.WorkingDirectory(ctx) + configfilename, err := appconfig.ResolveConfigFileFromPath(path) + if err != nil { + return err + } + + if exists, _ := appconfig.ConfigFileExistsAtPath(configfilename); exists && !flag.GetBool(ctx, "yes") { + confirmation, err := prompt.Confirmf(ctx, + "An existing configuration file has been found\nOverwrite file '%s'", configfilename) + if err != nil { + return err + } + if !confirmation { + return nil + } + } + + cfg := appconfig.Config{ + AppName: app.Name, + } + + err = cfg.WriteToDisk(ctx, configfilename) + if err != nil { + return fmt.Errorf("failed to save app name to config file: %w", err) + } + } + return nil } diff --git a/internal/command/apps/destroy.go b/internal/command/apps/destroy.go index 5b99d4e90f..fd7bfc713f 100644 --- a/internal/command/apps/destroy.go +++ b/internal/command/apps/destroy.go @@ -12,6 +12,7 @@ import ( "github.com/superfly/flyctl/iostreams" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/flag" "github.com/superfly/flyctl/internal/prompt" @@ -84,7 +85,7 @@ func RunDestroy(ctx context.Context) error { } if bucket != nil { - _, err = gql.DeleteAddOn(ctx, client.GenqClient(), bucket.Name) + _, err = gql.DeleteAddOn(ctx, client.GenqClient(), bucket.Name, string(gql.AddOnTypeTigris)) if err != nil { return err } @@ -95,6 +96,8 @@ func RunDestroy(ctx context.Context) error { return err } + _ = appsecrets.DeleteMinvers(ctx, appName) + fmt.Fprintf(io.Out, "Destroyed app %s\n", appName) } diff --git a/internal/command/apps/move.go b/internal/command/apps/move.go index 6c23555238..f0444ef76d 100644 --- a/internal/command/apps/move.go +++ b/internal/command/apps/move.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/spf13/cobra" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/command/deploy/statics" "github.com/superfly/flyctl/internal/flag/completion" "github.com/superfly/flyctl/internal/flyutil" @@ -134,12 +135,18 @@ func runMoveAppOnMachines(ctx context.Context, app *fly.App, targetOrg *fly.Orga } } + minvers, err := appsecrets.GetMinvers(app.Name) + if err != nil { + return err + } + for _, machine := range machines { input := &fly.LaunchMachineInput{ - Name: machine.Name, - Region: machine.Region, - Config: machine.Config, - SkipHealthChecks: skipHealthChecks, + Name: machine.Name, + Region: machine.Region, + Config: machine.Config, + SkipHealthChecks: skipHealthChecks, + MinSecretsVersion: minvers, } mach.Update(ctx, machine, input) } diff --git a/internal/command/auth/docker.go b/internal/command/auth/docker.go index 4e8380b1a9..f9f55e20a7 100644 --- a/internal/command/auth/docker.go +++ b/internal/command/auth/docker.go @@ -22,10 +22,11 @@ import ( func newDocker() *cobra.Command { const ( - long = `Adds registry.fly.io to the docker daemon's authenticated -registries. This allows you to push images directly to fly from -the docker cli. -` + long = `Adds registry.fly.io to the Docker daemon's authenticated +registries. This allows you to push images directly to Fly.io from +the Docker CLI. + +Note: Tokens generated by this command expire after 5 minutes.` short = "Authenticate docker" ) diff --git a/internal/command/auth/webauth/webauth.go b/internal/command/auth/webauth/webauth.go index 03f45e0eec..cfb70fc0ac 100644 --- a/internal/command/auth/webauth/webauth.go +++ b/internal/command/auth/webauth/webauth.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "os" "time" "github.com/azazeal/pause" @@ -45,7 +46,33 @@ func SaveToken(ctx context.Context, token string) error { } func RunWebLogin(ctx context.Context, signup bool) (string, error) { - auth, err := fly.StartCLISessionWebAuth(state.Hostname(ctx), signup) + args := map[string]interface{}{ + "signup": signup, + "target": "auth", + } + + var ( + lockOrg = os.Getenv("FLY_TOKEN_LOCK_ORG") + lockApp = os.Getenv("FLY_TOKEN_LOCK_APP") + lockInstance = os.Getenv("FLY_TOKEN_LOCK_INSTANCE") + metadata map[string]interface{} + ) + + if lockOrg != "" || lockApp != "" || lockInstance != "" { + metadata = map[string]interface{}{} + args["metadata"] = metadata + } + if lockOrg != "" { + metadata["lock_organization"] = lockOrg + } + if lockApp != "" { + metadata["lock_app"] = lockApp + } + if lockInstance != "" { + metadata["lock_instance"] = lockInstance + } + + auth, err := fly.StartCLISession(state.Hostname(ctx), args) if err != nil { return "", err } diff --git a/internal/command/certificates/root.go b/internal/command/certificates/root.go index 4dafcb0c30..0b6f3b893b 100644 --- a/internal/command/certificates/root.go +++ b/internal/command/certificates/root.go @@ -9,6 +9,7 @@ import ( "github.com/dustin/go-humanize" fly "github.com/superfly/fly-go" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/certificate" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/config" "github.com/superfly/flyctl/internal/flag" @@ -36,6 +37,7 @@ certificates issued for the hostname/domain by Let's Encrypt.` newCertificatesRemove(), newCertificatesShow(), newCertificatesCheck(), + newCertificatesSetup(), ) return cmd } @@ -136,6 +138,25 @@ Displays results in the same format as the SHOW command.` return cmd } +func newCertificatesSetup() *cobra.Command { + const ( + short = "Shows certificate setup instructions" + long = `Shows setup instructions for configuring DNS records for a certificate. +Takes hostname as a parameter to show the setup instructions for that certificate.` + ) + cmd := command.New("setup ", short, long, runCertificatesSetup, + command.RequireSession, + command.RequireAppName, + ) + flag.Add(cmd, + flag.App(), + flag.AppConfig(), + flag.JSONOutput(), + ) + cmd.Args = cobra.ExactArgs(1) + return cmd +} + func runCertificatesList(ctx context.Context) error { appName := appconfig.NameFromContext(ctx) apiClient := flyutil.ClientFromContext(ctx) @@ -160,11 +181,17 @@ func runCertificatesShow(ctx context.Context) error { printCertificate(ctx, cert) + // Display validation errors if any exist + if len(cert.ValidationErrors) > 0 { + io := iostreams.FromContext(ctx) + certificate.DisplayValidationErrors(io, cert.ValidationErrors) + } + if cert.ClientStatus == "Ready" { return nil } - return reportNextStepCert(ctx, hostname, cert, hostcheck) + return reportNextStepCert(ctx, hostname, cert, hostcheck, DNSDisplaySkip) } func runCertificatesCheck(ctx context.Context) error { @@ -179,11 +206,17 @@ func runCertificatesCheck(ctx context.Context) error { printCertificate(ctx, cert) + // Display validation errors if any exist + if len(cert.ValidationErrors) > 0 { + io := iostreams.FromContext(ctx) + certificate.DisplayValidationErrors(io, cert.ValidationErrors) + } + if cert.ClientStatus == "Ready" { return nil } - return reportNextStepCert(ctx, hostname, cert, hostcheck) + return reportNextStepCert(ctx, hostname, cert, hostcheck, DNSDisplaySkip) } func runCertificatesAdd(ctx context.Context) error { @@ -196,7 +229,7 @@ func runCertificatesAdd(ctx context.Context) error { return err } - return reportNextStepCert(ctx, hostname, cert, hostcheck) + return reportNextStepCert(ctx, hostname, cert, hostcheck, DNSDisplayForce) } func runCertificatesRemove(ctx context.Context) error { @@ -232,7 +265,28 @@ func runCertificatesRemove(ctx context.Context) error { return nil } -func reportNextStepCert(ctx context.Context, hostname string, cert *fly.AppCertificate, hostcheck *fly.HostnameCheck) error { +func runCertificatesSetup(ctx context.Context) error { + apiClient := flyutil.ClientFromContext(ctx) + appName := appconfig.NameFromContext(ctx) + hostname := flag.FirstArg(ctx) + + cert, hostcheck, err := apiClient.CheckAppCertificate(ctx, appName, hostname) + if err != nil { + return err + } + + return reportNextStepCert(ctx, hostname, cert, hostcheck, DNSDisplayForce) +} + +type DNSDisplayMode int + +const ( + DNSDisplayAuto DNSDisplayMode = iota // Show setup steps if required + DNSDisplayForce // Always show setup steps + DNSDisplaySkip // Never show setup steps +) + +func reportNextStepCert(ctx context.Context, hostname string, cert *fly.AppCertificate, hostcheck *fly.HostnameCheck, dnsMode DNSDisplayMode) error { io := iostreams.FromContext(ctx) // print a blank line, easier to read! @@ -241,7 +295,6 @@ func reportNextStepCert(ctx context.Context, hostname string, cert *fly.AppCerti colorize := io.ColorScheme() appName := appconfig.NameFromContext(ctx) apiClient := flyutil.ClientFromContext(ctx) - alternateHostname := getAlternateHostname(hostname) // These are the IPs we have for the app ips, err := apiClient.GetIPAddresses(ctx, appName) @@ -249,16 +302,23 @@ func reportNextStepCert(ctx context.Context, hostname string, cert *fly.AppCerti return err } + cnameTarget, err := apiClient.GetAppCNAMETarget(ctx, appName) + if err != nil { + return err + } + var ipV4 fly.IPAddress var ipV6 fly.IPAddress var configuredipV4 bool var configuredipV6 bool + var externalProxyHint bool // Extract the v4 and v6 addresses we have allocated for _, x := range ips { - if x.Type == "v4" || x.Type == "shared_v4" { + switch x.Type { + case "v4", "shared_v4": ipV4 = x - } else if x.Type == "v6" { + case "v6": ipV6 = x } } @@ -268,11 +328,11 @@ func reportNextStepCert(ctx context.Context, hostname string, cert *fly.AppCerti // Let's check the first A record against our recorded addresses ip := net.ParseIP(hostcheck.ARecords[0]) if !ip.Equal(net.ParseIP(ipV4.Address)) { - if isCloudflareProxied(cert.DNSProvider, ip) { - return printCloudflareInstructions(ctx, hostname, cert) + if isExternalProxied(cert.DNSProvider, ip) { + externalProxyHint = true + } else { + fmt.Fprintf(io.Out, colorize.Yellow("A Record (%s) does not match app's IP (%s)\n"), hostcheck.ARecords[0], ipV4.Address) } - - fmt.Fprintf(io.Out, colorize.Yellow("A Record (%s) does not match app's IP (%s)\n"), hostcheck.ARecords[0], ipV4.Address) } else { configuredipV4 = true } @@ -282,10 +342,11 @@ func reportNextStepCert(ctx context.Context, hostname string, cert *fly.AppCerti // Let's check the first A record against our recorded addresses ip := net.ParseIP(hostcheck.AAAARecords[0]) if !ip.Equal(net.ParseIP(ipV6.Address)) { - if isCloudflareProxied(cert.DNSProvider, ip) { - return printCloudflareInstructions(ctx, hostname, cert) + if isExternalProxied(cert.DNSProvider, ip) { + externalProxyHint = true + } else { + fmt.Fprintf(io.Out, colorize.Yellow("AAAA Record (%s) does not match app's IP (%s)\n"), hostcheck.AAAARecords[0], ipV6.Address) } - fmt.Fprintf(io.Out, colorize.Yellow("AAAA Record (%s) does not match app's IP (%s)\n"), hostcheck.AAAARecords[0], ipV6.Address) } else { configuredipV6 = true } @@ -299,132 +360,193 @@ func reportNextStepCert(ctx context.Context, hostname string, cert *fly.AppCerti } else if ip.Equal(net.ParseIP(ipV6.Address)) { configuredipV6 = true } else { - if isCloudflareProxied(cert.DNSProvider, ip) { - return printCloudflareInstructions(ctx, hostname, cert) + if isExternalProxied(cert.DNSProvider, ip) { + externalProxyHint = true + } else { + fmt.Fprintf(io.Out, colorize.Yellow("Address resolution (%s) does not match app's IP (%s/%s)\n"), address, ipV4.Address, ipV6.Address) } - fmt.Fprintf(io.Out, colorize.Yellow("Address resolution (%s) does not match app's IP (%s/%s)\n"), address, ipV4.Address, ipV6.Address) } } } - if cert.IsApex { - // If this is an apex domain we should guide towards creating A and AAAA records - addArecord := !configuredipV4 - addAAAArecord := !cert.AcmeALPNConfigured - - if addArecord || addAAAArecord { - stepcnt := 1 - fmt.Fprintf(io.Out, "You are creating a certificate for %s\n", colorize.Bold(hostname)) - fmt.Fprintf(io.Out, "We are using %s for this certificate.\n\n", cert.CertificateAuthority) - if addArecord { - fmt.Fprintf(io.Out, "You can direct traffic to %s by:\n\n", hostname) - fmt.Fprintf(io.Out, "%d: Adding an A record to your DNS service which reads\n", stepcnt) - fmt.Fprintf(io.Out, "\n A @ %s\n\n", ipV4.Address) - stepcnt = stepcnt + 1 - } - if addAAAArecord { - fmt.Fprintf(io.Out, "You can validate your ownership of %s by:\n\n", hostname) - fmt.Fprintf(io.Out, "%d: Adding an AAAA record to your DNS service which reads:\n\n", stepcnt) - fmt.Fprintf(io.Out, " AAAA @ %s\n\n", ipV6.Address) - // stepcnt = stepcnt + 1 Uncomment if more steps - } - } else { - if cert.ClientStatus == "Ready" { - fmt.Fprintf(io.Out, "Your certificate for %s has been issued, make sure you create another certificate for %s \n", hostname, alternateHostname) - } else { - fmt.Fprintf(io.Out, "Your certificate for %s is being issued. Status is %s. Make sure to create another certificate for %s when the current certificate is issued. \n", hostname, cert.ClientStatus, alternateHostname) - } - } - } else if cert.IsWildcard { - // If this is an wildcard domain we should guide towards satisfying a DNS-01 challenge - addArecord := !configuredipV4 - addCNAMErecord := !cert.AcmeDNSConfigured - - stepcnt := 1 - fmt.Fprintf(io.Out, "You are creating a wildcard certificate for %s\n", hostname) - fmt.Fprintf(io.Out, "We are using %s for this certificate.\n\n", cert.CertificateAuthority) - if addArecord { - fmt.Fprintf(io.Out, "You can direct traffic to %s by:\n\n", hostname) - fmt.Fprintf(io.Out, "%d: Adding an A record to your DNS service which reads\n", stepcnt) - stepcnt = stepcnt + 1 - fmt.Fprintf(io.Out, "\n A @ %s\n\n", ipV4.Address) - } - - if addCNAMErecord { - printDNSValidationInstructions(ctx, stepcnt, hostname, cert) - // stepcnt = stepcnt + 1 Uncomment if more steps - } - } else { - // This is not an apex domain - // If A and AAAA record is not configured offer CNAME - + var addDNSConfig bool + switch { + case cert.IsApex: + addDNSConfig = !configuredipV4 || !configuredipV6 + case cert.IsWildcard: + addDNSConfig = !configuredipV4 || !cert.AcmeDNSConfigured + default: nothingConfigured := !(configuredipV4 && configuredipV6) onlyV4Configured := configuredipV4 && !configuredipV6 + addDNSConfig = nothingConfigured || onlyV4Configured + } - if nothingConfigured || onlyV4Configured { - fmt.Fprintf(io.Out, "You are creating a certificate for %s\n", hostname) - fmt.Fprintf(io.Out, "We are using %s for this certificate.\n\n", readableCertAuthority(cert.CertificateAuthority)) + switch { + case dnsMode == DNSDisplaySkip && addDNSConfig: + fmt.Fprintln(io.Out, "Your DNS is not yet configured correctly.") + fmt.Fprintf(io.Out, "Run %s to view DNS setup instructions.\n", colorize.Bold("fly certs setup "+hostname)) + case dnsMode == DNSDisplayForce || (dnsMode == DNSDisplayAuto && addDNSConfig): + printDNSSetupOptions(DNSSetupFlags{ + Context: ctx, + Hostname: hostname, + Certificate: cert, + IPv4Address: ipV4, + IPv6Address: ipV6, + CNAMETarget: cnameTarget, + ExternalProxyDetected: externalProxyHint, + }) + case cert.ClientStatus == "Ready": + fmt.Fprintf(io.Out, "Your certificate for %s has been issued. \n", hostname) + default: + fmt.Fprintf(io.Out, "Your certificate for %s is being issued. Status is %s. \n", hostname, cert.ClientStatus) + } - if nothingConfigured { - fmt.Fprintf(io.Out, "You can configure your DNS for %s by:\n\n", hostname) + if dnsMode != DNSDisplaySkip && !cert.IsWildcard && needsAlternateHostname(hostname) { + alternateHostname := getAlternateHostname(hostname) + fmt.Fprintf(io.Out, "Make sure to create another certificate for %s. \n", alternateHostname) + } + + return nil +} - eTLD, _ := publicsuffix.EffectiveTLDPlusOne(hostname) - subdomainname := strings.TrimSuffix(hostname, eTLD) - fmt.Fprintf(io.Out, "1: Adding an CNAME record to your DNS service which reads:\n") - fmt.Fprintf(io.Out, "\n CNAME %s %s.fly.dev\n", subdomainname, appName) - } else if onlyV4Configured { - printDNSValidationInstructions(ctx, 1, hostname, cert) +func isExternalProxied(provider string, ip net.IP) bool { + if provider == CLOUDFLARE { + for _, ipnet := range CloudflareIPs { + if ipnet.Contains(ip) { + return true } - } else { - if cert.ClientStatus == "Ready" { - fmt.Fprintf(io.Out, "Your certificate for %s has been issued, make sure you create another certificate for %s \n", hostname, alternateHostname) - } else { - fmt.Fprintf(io.Out, "Your certificate for %s is being issued. Status is %s. Make sure to create another certificate for %s when the current certificate is issued. \n", hostname, cert.ClientStatus, alternateHostname) + } + } else { + for _, ipnet := range FastlyIPs { + if ipnet.Contains(ip) { + return true } } } - return nil + return false } -func printDNSValidationInstructions(ctx context.Context, stepcnt int, hostname string, cert *fly.AppCertificate) { - io := iostreams.FromContext(ctx) - - fmt.Fprintf(io.Out, "You can validate your ownership of %s by:\n\n", hostname) - - fmt.Fprintf(io.Out, "%d: Adding an CNAME record to your DNS service which reads:\n", stepcnt) - fmt.Fprintf(io.Out, " %s\n", cert.DNSValidationInstructions) +type DNSSetupFlags struct { + Context context.Context + Hostname string + Certificate *fly.AppCertificate + IPv4Address fly.IPAddress + IPv6Address fly.IPAddress + CNAMETarget string + ExternalProxyDetected bool } -func isCloudflareProxied(provider string, ip net.IP) bool { - if provider != CLOUDFLARE { - return false +func printDNSSetupOptions(opts DNSSetupFlags) error { + io := iostreams.FromContext(opts.Context) + colorize := io.ColorScheme() + hasIPv4 := opts.IPv4Address.Address != "" + hasIPv6 := opts.IPv6Address.Address != "" + promoteExtProxy := opts.ExternalProxyDetected && !opts.Certificate.IsWildcard + + fmt.Fprintf(io.Out, "You are creating a certificate for %s\n", colorize.Bold(opts.Hostname)) + fmt.Fprintf(io.Out, "We are using %s for this certificate.\n\n", readableCertAuthority(opts.Certificate.CertificateAuthority)) + + if promoteExtProxy { + fmt.Fprintln(io.Out, colorize.Blue("It looks like your hostname currently resolves to a proxy or CDN.")) + fmt.Fprintln(io.Out, "If you are planning to use a proxy or CDN in front of your Fly application,") + fmt.Fprintf(io.Out, "using the %s will ensure Fly can generate a certificate automatically.\n", colorize.Green("external proxy setup")) + fmt.Fprintln(io.Out) } - for _, ipnet := range CloudflareIPs { - if ipnet.Contains(ip) { - return true + + fmt.Fprintln(io.Out, "You can direct traffic to your Fly application by adding records to your DNS provider.") + fmt.Fprintln(io.Out) + + fmt.Fprintln(io.Out, colorize.Bold("Choose your DNS setup:")) + fmt.Fprintln(io.Out) + + optionNum := 1 + + if promoteExtProxy { + if hasIPv4 { + fmt.Fprintf(io.Out, colorize.Green("%d. External proxy setup\n\n"), optionNum) + fmt.Fprintf(io.Out, " AAAA %s → %s\n\n", getRecordName(opts.Hostname), opts.IPv6Address.Address) + fmt.Fprintln(io.Out, " When proxying traffic, you should only use your application's IPv6 address.") + fmt.Fprintln(io.Out) + optionNum++ + } else { + fmt.Fprintf(io.Out, colorize.Yellow("%d. External proxy setup (requires IPv6 allocation)\n"), optionNum) + fmt.Fprintf(io.Out, " Run: %s to allocate IPv6 address\n", colorize.Bold("fly ips allocate-v6")) + fmt.Fprintf(io.Out, " Then: %s to view these instructions again\n\n", colorize.Bold("fly certs setup "+opts.Hostname)) + fmt.Fprintln(io.Out, " When proxying traffic, you should only use your application's IPv6 address.") + fmt.Fprintln(io.Out) + optionNum++ } } - return false -} -func printCloudflareInstructions(ctx context.Context, hostname string, cert *fly.AppCertificate) error { - io := iostreams.FromContext(ctx) - colorize := io.ColorScheme() + fmt.Fprintf(io.Out, colorize.Green("%d. A and AAAA records (recommended for direct connections)\n\n"), optionNum) + if hasIPv4 { + fmt.Fprintf(io.Out, " A %s → %s\n", getRecordName(opts.Hostname), opts.IPv4Address.Address) + } else { + fmt.Fprintf(io.Out, " %s\n", colorize.Yellow("No IPv4 addresses are allocated for your application.")) + fmt.Fprintf(io.Out, " Run: %s to allocate recommended addresses\n", colorize.Bold("fly ips allocate")) + fmt.Fprintf(io.Out, " Then: %s to view these instructions again\n", colorize.Bold("fly certs setup "+opts.Hostname)) + } + if hasIPv6 { + fmt.Fprintf(io.Out, " AAAA %s → %s\n", getRecordName(opts.Hostname), opts.IPv6Address.Address) + } else { + fmt.Fprintf(io.Out, "\n %s\n", colorize.Yellow("No IPv6 addresses are allocated for your application.")) + fmt.Fprintf(io.Out, " Run: %s to allocate a dedicated IPv6 address\n", colorize.Bold("fly ips allocate-v6")) + fmt.Fprintf(io.Out, " Then: %s to view these instructions again\n", colorize.Bold("fly certs setup "+opts.Hostname)) + } + fmt.Fprintln(io.Out) + optionNum++ - fmt.Fprintln(io.Out, colorize.Yellow("You're using Cloudflare's proxying feature (orange cloud active) for this hostname.")) - fmt.Fprintln(io.Out, "If you do not need Cloudflare-specific features, it's best to turn off proxying.") - fmt.Fprintln(io.Out, "The only way to create certificates for proxied hostnames is to use the DNS challenge.") + if !opts.Certificate.IsApex && (hasIPv4 || hasIPv6) && opts.CNAMETarget != "" { + fmt.Fprintf(io.Out, colorize.Cyan("%d. CNAME record\n\n"), optionNum) + fmt.Fprintf(io.Out, " CNAME %s → %s\n", getRecordName(opts.Hostname), opts.CNAMETarget) + fmt.Fprintln(io.Out) + optionNum++ + } - printDNSValidationInstructions(ctx, 1, hostname, cert) + if !promoteExtProxy && !opts.Certificate.IsWildcard { + fmt.Fprintf(io.Out, colorize.Blue("%d. External proxy setup\n\n"), optionNum) + if hasIPv6 { + fmt.Fprintf(io.Out, " AAAA %s → %s\n\n", getRecordName(opts.Hostname), opts.IPv6Address.Address) + } else { + fmt.Fprintf(io.Out, " %s\n", colorize.Yellow("No IPv6 addresses are allocated for your application.")) + fmt.Fprintf(io.Out, " Run: %s to allocate a dedicated IPv6 address\n", colorize.Bold("fly ips allocate-v6")) + fmt.Fprintf(io.Out, " Then: %s to view these instructions again\n\n", colorize.Bold("fly certs setup "+opts.Hostname)) + } + fmt.Fprintln(io.Out, " Use this setup when configuring a proxy or CDN in front of your Fly application.") + fmt.Fprintln(io.Out, " When proxying traffic, you should only use your application's IPv6 address.") + fmt.Fprintln(io.Out) + // optionNum++ uncomment if steps added. + } + if opts.Certificate.IsWildcard { + fmt.Fprint(io.Out, colorize.Yellow("Required: DNS Challenge\n\n")) + } else { + fmt.Fprint(io.Out, colorize.Yellow("Optional: DNS Challenge\n\n")) + } + fmt.Fprintf(io.Out, " %s → %s\n\n", opts.Certificate.DNSValidationHostname, opts.Certificate.DNSValidationTarget) + fmt.Fprintln(io.Out, " Additional to one of the DNS setups.") + if opts.Certificate.IsWildcard { + fmt.Fprintf(io.Out, " %s\n", colorize.Yellow("Required for this wildcard certificate.")) + } else { + fmt.Fprintln(io.Out, " Required for wildcard certificates, or to generate") + fmt.Fprintln(io.Out, " a certificate before directing traffic to your application.") + } fmt.Fprintln(io.Out) - fmt.Fprintln(io.Out, "If you've already set this up, your certificate should be issued soon.") - fmt.Fprintln(io.Out, "For much more information, check our docs at: https://fly.io/docs/networking/custom-domain/") return nil } +func getRecordName(hostname string) string { + eTLD, _ := publicsuffix.EffectiveTLDPlusOne(hostname) + subdomainname := strings.TrimSuffix(hostname, eTLD) + + if subdomainname == "" { + return "@" + } + return strings.TrimSuffix(subdomainname, ".") +} + func printCertificate(ctx context.Context, cert *fly.AppCertificate) { io := iostreams.FromContext(ctx) colorize := io.ColorScheme() @@ -482,6 +604,10 @@ func printCertificates(ctx context.Context, certs []fly.AppCertificateCompact) e return nil } +func needsAlternateHostname(hostname string) bool { + return strings.Split(hostname, ".")[0] == "www" || len(strings.Split(hostname, ".")) == 2 +} + func getAlternateHostname(hostname string) string { if strings.Split(hostname, ".")[0] == "www" { return strings.Replace(hostname, "www.", "", 1) @@ -524,3 +650,27 @@ var CloudflareIPs = []*net.IPNet{ mustParseCIDR("2a06:98c0::/29"), mustParseCIDR("2c0f:f248::/32"), } + +var FastlyIPs = []*net.IPNet{ + mustParseCIDR("23.235.32.0/20"), + mustParseCIDR("43.249.72.0/22"), + mustParseCIDR("103.244.50.0/24"), + mustParseCIDR("103.245.222.0/23"), + mustParseCIDR("103.245.224.0/24"), + mustParseCIDR("104.156.80.0/20"), + mustParseCIDR("140.248.64.0/18"), + mustParseCIDR("140.248.128.0/17"), + mustParseCIDR("146.75.0.0/17"), + mustParseCIDR("151.101.0.0/16"), + mustParseCIDR("157.52.64.0/18"), + mustParseCIDR("167.82.0.0/17"), + mustParseCIDR("167.82.128.0/20"), + mustParseCIDR("167.82.160.0/20"), + mustParseCIDR("167.82.224.0/20"), + mustParseCIDR("172.111.64.0/18"), + mustParseCIDR("185.31.16.0/22"), + mustParseCIDR("199.27.72.0/21"), + mustParseCIDR("199.232.0.0/16"), + mustParseCIDR("2a04:4e40::/32"), + mustParseCIDR("2a04:4e42::/32"), +} diff --git a/internal/command/command.go b/internal/command/command.go index 4251b46417..100cc1dad2 100644 --- a/internal/command/command.go +++ b/internal/command/command.go @@ -19,6 +19,8 @@ import ( "github.com/superfly/flyctl/internal/command/auth/webauth" "github.com/superfly/flyctl/internal/flyutil" "github.com/superfly/flyctl/internal/prompt" + "github.com/superfly/flyctl/internal/uiex" + "github.com/superfly/flyctl/internal/uiexutil" "github.com/superfly/flyctl/iostreams" "github.com/superfly/flyctl/internal/appconfig" @@ -603,6 +605,24 @@ func RequireSession(ctx context.Context) (context.Context, error) { return ctx, nil } +// Apply uiex client to uiex +func RequireUiex(ctx context.Context) (context.Context, error) { + cfg := config.FromContext(ctx) + + if uiexutil.ClientFromContext(ctx) == nil { + client, err := uiexutil.NewClientWithOptions(ctx, uiex.NewClientOpts{ + Logger: logger.FromContext(ctx), + Tokens: cfg.Tokens, + }) + if err != nil { + return nil, err + } + ctx = uiexutil.NewContextWithClient(ctx, client) + } + + return ctx, nil +} + func tryOpenUserURL(ctx context.Context, url string) error { io := iostreams.FromContext(ctx) diff --git a/internal/command/command_run.go b/internal/command/command_run.go index 71d4a7cbbc..346df6935f 100644 --- a/internal/command/command_run.go +++ b/internal/command/command_run.go @@ -9,6 +9,7 @@ import ( "os" "path" "path/filepath" + "slices" "strconv" "strings" @@ -17,7 +18,6 @@ import ( "github.com/samber/lo" fly "github.com/superfly/fly-go" "github.com/superfly/flyctl/iostreams" - "golang.org/x/exp/slices" "github.com/superfly/flyctl/internal/appconfig" "github.com/superfly/flyctl/internal/build/imgsrc" @@ -26,6 +26,7 @@ import ( "github.com/superfly/flyctl/internal/flag" "github.com/superfly/flyctl/internal/flapsutil" "github.com/superfly/flyctl/internal/flyutil" + "github.com/superfly/flyctl/internal/launchdarkly" "github.com/superfly/flyctl/internal/state" ) @@ -36,8 +37,35 @@ func DetermineImage(ctx context.Context, appName string, imageOrPath string) (im cfg = appconfig.ConfigFromContext(ctx) ) - daemonType := imgsrc.NewDockerDaemonType(!flag.GetBool(ctx, "build-remote-only"), !flag.GetBool(ctx, "build-local-only"), env.IsCI(), flag.GetBool(ctx, "build-depot"), flag.GetBool(ctx, "build-nixpacks")) - resolver := imgsrc.NewResolver(daemonType, client, appName, io, flag.GetWireguard(ctx), false) + appCompact, err := client.GetAppCompact(ctx, appName) + if err != nil { + return nil, err + } + + // Start the feature flag client, if we haven't already + if launchdarkly.ClientFromContext(ctx) == nil { + ffClient, err := launchdarkly.NewClient(ctx, launchdarkly.UserInfo{ + OrganizationID: appCompact.Organization.InternalNumericID, + UserID: 0, + }) + if err != nil { + return nil, fmt.Errorf("could not create feature flag client: %w", err) + } + ctx = launchdarkly.NewContextWithClient(ctx, ffClient) + } + + org, err := client.GetOrganizationByApp(ctx, appName) + if err != nil { + return nil, err + } + + ldClient := launchdarkly.ClientFromContext(ctx) + useManagedBuilder := ldClient.ManagedBuilderEnabled() + daemonType := imgsrc.NewDockerDaemonType(!flag.GetBool(ctx, "build-remote-only"), !flag.GetBool(ctx, "build-local-only"), env.IsCI(), flag.GetBool(ctx, "build-depot"), flag.GetBool(ctx, "build-nixpacks"), useManagedBuilder) + resolver := imgsrc.NewResolver( + daemonType, client, appName, io, flag.GetWireguard(ctx), false, + imgsrc.WithProvisioner(imgsrc.NewProvisioner(org)), + ) // build if relative or absolute path if strings.HasPrefix(imageOrPath, ".") || strings.HasPrefix(imageOrPath, "/") { @@ -73,14 +101,7 @@ func DetermineImage(ctx context.Context, appName string, imageOrPath string) (im } opts.BuildArgs = extraArgs - if cfg != nil && cfg.Experimental != nil { - opts.UseZstd = cfg.Experimental.UseZstd - } - - // use-zstd passed through flags takes precedence over the one set in config - if flag.IsSpecified(ctx, "use-zstd") { - opts.UseZstd = flag.GetBool(ctx, "use-zstd") - } + opts.Compression, opts.CompressionLevel = cfg.DetermineCompression(ctx) img, err = resolver.BuildImage(ctx, io, opts) if err != nil { @@ -108,7 +129,7 @@ func DetermineImage(ctx context.Context, appName string, imageOrPath string) (im return nil, errors.New("could not find an image to deploy") } - fmt.Fprintf(io.Out, "Image: %s\n", img.Tag) + fmt.Fprintf(io.Out, "Image: %s\n", img.String()) fmt.Fprintf(io.Out, "Image size: %s\n\n", humanize.Bytes(uint64(img.Size))) return img, nil diff --git a/internal/command/config/env.go b/internal/command/config/env.go index c7c551a3d7..c028d6089f 100644 --- a/internal/command/config/env.go +++ b/internal/command/config/env.go @@ -6,12 +6,11 @@ import ( "github.com/samber/lo" "github.com/spf13/cobra" fly "github.com/superfly/fly-go" - "github.com/superfly/fly-go/flaps" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/flag" "github.com/superfly/flyctl/internal/flapsutil" - "github.com/superfly/flyctl/internal/flyutil" "github.com/superfly/flyctl/internal/render" "github.com/superfly/flyctl/iostreams" ) @@ -32,29 +31,25 @@ secrets and another for config file defined environment variables.` } func runEnv(ctx context.Context) error { - apiClient := flyutil.ClientFromContext(ctx) appName := appconfig.NameFromContext(ctx) - io := iostreams.FromContext(ctx) - - secrets, err := apiClient.GetAppSecrets(ctx, appName) + ctx, flapsClient, _, err := flapsutil.SetClient(ctx, nil, appName) if err != nil { return err } - secretRows := lo.Map(secrets, func(s fly.Secret, _ int) []string { - return []string{s.Name, s.Digest, s.CreatedAt.Format("2006-01-02T15:04:05")} - }) - if err := render.Table(io.Out, "Secrets", secretRows, "Name", "Digest", "Created At"); err != nil { + io := iostreams.FromContext(ctx) + + secrets, err := appsecrets.List(ctx, flapsClient, appName) + if err != nil { return err } - flapsClient, err := flapsutil.NewClientWithOptions(ctx, flaps.NewClientOpts{ - AppName: appName, + secretRows := lo.Map(secrets, func(s fly.AppSecret, _ int) []string { + return []string{s.Name, s.Digest} }) - if err != nil { + if err := render.Table(io.Out, "Secrets", secretRows, "Name", "Digest"); err != nil { return err } - ctx = flapsutil.NewContextWithClient(ctx, flapsClient) cfg, err := appconfig.FromRemoteApp(ctx, appName) if err != nil { diff --git a/internal/command/config/validate.go b/internal/command/config/validate.go index 1dcae5618a..547faac3ab 100644 --- a/internal/command/config/validate.go +++ b/internal/command/config/validate.go @@ -2,6 +2,7 @@ package config import ( "context" + "errors" "fmt" "github.com/spf13/cobra" @@ -22,18 +23,65 @@ ensure it is correct and meaningful to the platform.` command.RequireAppName, ) cmd.Args = cobra.NoArgs - flag.Add(cmd, flag.App(), flag.AppConfig()) + flag.Add(cmd, flag.App(), flag.AppConfig(), flag.Bool{ + Name: "strict", + Shorthand: "s", + Description: "Enable strict validation to check for unrecognized sections and keys", + Default: false, + }) return } func runValidate(ctx context.Context) error { io := iostreams.FromContext(ctx) cfg := appconfig.ConfigFromContext(ctx) + strictMode := flag.GetBool(ctx, "strict") - if err := cfg.SetMachinesPlatform(); err != nil { + // if not found locally, try to get it from the remote app + var err error + if cfg == nil { + appName := appconfig.NameFromContext(ctx) + if appName == "" { + return errors.New("app name is required") + } else { + cfg, err = appconfig.FromRemoteApp(ctx, appName) + if err != nil { + return err + } + } + } + + var rawConfig map[string]any + if strictMode { + // Load config with raw data for strict validation + rawConfig, err = appconfig.LoadConfigAsMap(cfg.ConfigFilePath()) + if err != nil { + return fmt.Errorf("failed to load config for strict validation: %w", err) + } + } + + // Run standard validation + if err = cfg.SetMachinesPlatform(); err != nil { return err } - err, extra_info := cfg.Validate(ctx) - fmt.Fprintln(io.Out, extra_info) + err, extraInfo := cfg.Validate(ctx) + fmt.Fprintln(io.Out, extraInfo) + + // Run strict validation if enabled + if strictMode { + strictResult := appconfig.StrictValidate(rawConfig) + + if strictResult != nil && (len(strictResult.UnrecognizedSections) > 0 || len(strictResult.UnrecognizedKeys) > 0) { + strictOutput := appconfig.FormatStrictValidationErrors(strictResult) + if strictOutput != "" { + fmt.Fprintf(io.Out, "\nStrict validation found unrecognised sections or keys:\n%s\n\n\n", strictOutput) + // Return error to indicate validation failed + if err == nil { + err = errors.New("strict validation failed") + } + } + } + } + return err } diff --git a/internal/command/console/console.go b/internal/command/console/console.go index d991ec21a9..b3cf7378f8 100644 --- a/internal/command/console/console.go +++ b/internal/command/console/console.go @@ -10,11 +10,13 @@ import ( "github.com/google/shlex" "github.com/samber/lo" "github.com/spf13/cobra" + "github.com/superfly/flyctl/agent" fly "github.com/superfly/fly-go" "github.com/superfly/fly-go/flaps" "github.com/superfly/flyctl/helpers" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/cmdutil" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/command/ssh" @@ -53,9 +55,13 @@ func New() *cobra.Command { flag.Bool{ Name: "select", Shorthand: "s", - Description: "Select the machine on which to execute the console from a list.", + Description: "Select the machine and container on which to execute the console from a list.", Default: false, }, + flag.String{ + Name: "container", + Description: "Container to connect to", + }, flag.String{ Name: "user", Shorthand: "u", @@ -210,7 +216,7 @@ func runConsole(ctx context.Context) error { defer cleanup() } - _, dialer, err := ssh.BringUpAgent(ctx, apiClient, app, *network, false) + _, dialer, err := agent.BringUpAgent(ctx, apiClient, app, *network, false) if err != nil { return err } @@ -221,6 +227,7 @@ func runConsole(ctx context.Context) error { Dialer: dialer, Username: flag.GetString(ctx, "user"), DisableSpinner: false, + Container: flag.GetString(ctx, "container"), AppNames: []string{app.Name}, } sshClient, err := ssh.Connect(params, machine.PrivateIP) @@ -234,7 +241,7 @@ func runConsole(ctx context.Context) error { consoleCommand = flag.GetString(ctx, "command") } - return ssh.Console(ctx, sshClient, consoleCommand, true) + return ssh.Console(ctx, sshClient, consoleCommand, true, params.Container) } func selectMachine(ctx context.Context, app *fly.AppCompact, appConfig *appconfig.Config) (*fly.Machine, func(), error) { @@ -389,10 +396,16 @@ func makeEphemeralConsoleMachine(ctx context.Context, app *fly.AppCompact, appCo machConfig.Guest.HostDedicationID = hdid } + minvers, err := appsecrets.GetMinvers(app.Name) + if err != nil { + return nil, nil, err + } + input := &machine.EphemeralInput{ LaunchInput: fly.LaunchMachineInput{ - Config: machConfig, - Region: config.FromContext(ctx).Region, + Config: machConfig, + Region: config.FromContext(ctx).Region, + MinSecretsVersion: minvers, }, What: "to run the console", } diff --git a/internal/command/consul/attach.go b/internal/command/consul/attach.go index 2e1a3d3f26..9f7af13876 100644 --- a/internal/command/consul/attach.go +++ b/internal/command/consul/attach.go @@ -8,6 +8,7 @@ import ( "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/command/secrets" "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flapsutil" "github.com/superfly/flyctl/internal/flyutil" ) @@ -44,7 +45,7 @@ func runAttach(ctx context.Context) error { appName = appconfig.NameFromContext(ctx) secretName = flag.GetString(ctx, "variable-name") ) - appCompact, err := apiClient.GetAppCompact(ctx, appName) + ctx, flapsClient, app, err := flapsutil.SetClient(ctx, nil, appName) if err != nil { return err } @@ -55,6 +56,10 @@ func runAttach(ctx context.Context) error { secretsToSet := map[string]string{ secretName: consulPayload.ConsulURL, } - err = secrets.SetSecretsAndDeploy(ctx, appCompact, secretsToSet, false, false) + err = secrets.SetSecretsAndDeploy(ctx, flapsClient, app, secretsToSet, secrets.DeploymentArgs{ + Stage: false, + Detach: false, + CheckDNS: true, + }) return err } diff --git a/internal/command/consul/detach.go b/internal/command/consul/detach.go index 9c7989fd32..cabe7bd13a 100644 --- a/internal/command/consul/detach.go +++ b/internal/command/consul/detach.go @@ -8,7 +8,7 @@ import ( "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/command/secrets" "github.com/superfly/flyctl/internal/flag" - "github.com/superfly/flyctl/internal/flyutil" + "github.com/superfly/flyctl/internal/flapsutil" ) func newDetach() *cobra.Command { @@ -36,15 +36,18 @@ func newDetach() *cobra.Command { func runDetach(ctx context.Context) error { var ( - apiClient = flyutil.ClientFromContext(ctx) appName = appconfig.NameFromContext(ctx) secretName = flag.GetString(ctx, "variable-name") ) - appCompact, err := apiClient.GetAppCompact(ctx, appName) + ctx, flapsClient, app, err := flapsutil.SetClient(ctx, nil, appName) if err != nil { return err } secretsToUnset := []string{secretName} - err = secrets.UnsetSecretsAndDeploy(ctx, appCompact, secretsToUnset, false, false) + err = secrets.UnsetSecretsAndDeploy(ctx, flapsClient, app, secretsToUnset, secrets.DeploymentArgs{ + Stage: false, + Detach: false, + CheckDNS: true, + }) return err } diff --git a/internal/command/deploy/deploy.go b/internal/command/deploy/deploy.go index b7fbdb6b1a..1f9a42cbc9 100644 --- a/internal/command/deploy/deploy.go +++ b/internal/command/deploy/deploy.go @@ -19,6 +19,7 @@ import ( "github.com/superfly/flyctl/internal/config" "github.com/superfly/flyctl/internal/ctrlc" "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flag/validation" "github.com/superfly/flyctl/internal/flapsutil" "github.com/superfly/flyctl/internal/flyutil" "github.com/superfly/flyctl/internal/launchdarkly" @@ -53,6 +54,9 @@ var CommonFlags = flag.Set{ flag.Depot(), flag.DepotScope(), flag.Nixpacks(), + flag.BuildkitAddr(), + flag.BuildkitImage(), + flag.Buildkit(), flag.BuildOnly(), flag.BpDockerHost(), flag.BpVolume(), @@ -124,6 +128,10 @@ var CommonFlags = flag.Set{ Name: "file-secret", Description: "Set of secrets in the form of /path/inside/machine=SECRET pairs where SECRET is the name of the secret. Can be specified multiple times.", }, + flag.String{ + Name: "primary-region", + Description: "Override primary region in fly.toml configuration.", + }, flag.StringSlice{ Name: "regions", Aliases: []string{"only-regions"}, @@ -174,6 +182,15 @@ var CommonFlags = flag.Set{ Description: "Number of times to retry a deployment if it fails", Default: "auto", }, + flag.String{ + Name: "builder-pool", + Default: "auto", + NoOptDefVal: "true", + Description: "Experimental: Use pooled builder from Fly.io", + Hidden: true, + }, + flag.Compression(), + flag.CompressionLevel(), } type Command struct { @@ -194,6 +211,7 @@ func New() *Command { command.RequireSession, command.ChangeWorkingDirectoryToFirstArgIfPresent, command.RequireAppName, + command.RequireUiex, ) cmd.Args = cobra.MaximumNArgs(1) @@ -243,7 +261,11 @@ func (cmd *Command) run(ctx context.Context) (err error) { return err } - defer tp.Shutdown(ctx) + defer func() { + shutdownCtx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) + defer cancel() + tp.Shutdown(shutdownCtx) + }() ctx, span := tracing.CMDSpan(ctx, "cmd.deploy") defer span.End() @@ -274,6 +296,14 @@ func (cmd *Command) run(ctx context.Context) (err error) { span.SetAttributes(attribute.String("user.id", user.ID)) + if err := validation.ValidateCompressionFlag(flag.GetString(ctx, "compression")); err != nil { + return err + } + + if err := validation.ValidateCompressionLevelFlag(flag.GetInt(ctx, "compression-level")); err != nil { + return err + } + var manifestPath = flag.GetString(ctx, "from-manifest") switch { @@ -518,12 +548,17 @@ func deployToMachines( status.AppName = app.Name status.OrgSlug = app.Organization.Slug status.Image = img.Tag - status.PrimaryRegion = cfg.PrimaryRegion status.Strategy = cfg.DeployStrategy() if flag.GetString(ctx, "strategy") != "" { status.Strategy = flag.GetString(ctx, "strategy") } + if flag.IsSpecified(ctx, "primary-region") { + status.PrimaryRegion = flag.GetString(ctx, "primary-region") + } else { + status.PrimaryRegion = cfg.PrimaryRegion + } + status.FlyctlVersion = buildinfo.Info().Version.String() retriesFlag := flag.GetString(ctx, "deploy-retries") @@ -536,7 +571,7 @@ func deployToMachines( deployRetries = int(retries) default: - var invalidRetriesErr error = fmt.Errorf("--deploy-retries must be set to a positive integer, 0, or 'auto'") + var invalidRetriesErr = fmt.Errorf("--deploy-retries must be set to a positive integer, 0, or 'auto'") retries, err := strconv.Atoi(retriesFlag) if err != nil { return invalidRetriesErr @@ -561,7 +596,7 @@ func deployToMachines( DeploymentImage: img.Tag, Strategy: flag.GetString(ctx, "strategy"), EnvFromFlags: flag.GetStringArray(ctx, "env"), - PrimaryRegionFlag: cfg.PrimaryRegion, + PrimaryRegionFlag: status.PrimaryRegion, SkipSmokeChecks: flag.GetDetach(ctx) || !flag.GetBool(ctx, "smoke-checks"), SkipHealthChecks: flag.GetDetach(ctx), SkipDNSChecks: flag.GetDetach(ctx) || !flag.GetBool(ctx, "dns-checks"), @@ -586,6 +621,7 @@ func deployToMachines( ProcessGroups: processGroups, DeployRetries: deployRetries, BuildID: img.BuildID, + BuilderID: img.BuilderID, } var path = flag.GetString(ctx, "export-manifest") diff --git a/internal/command/deploy/deploy_build.go b/internal/command/deploy/deploy_build.go index dd0c50167f..a8a8192249 100644 --- a/internal/command/deploy/deploy_build.go +++ b/internal/command/deploy/deploy_build.go @@ -42,7 +42,7 @@ func multipleDockerfile(ctx context.Context, appConfig *appconfig.Config) error } if found != config { - return fmt.Errorf("Ignoring %s, and using %s (from fly.toml).", found, config) + return fmt.Errorf("ignoring %s, and using %s (from %s)", found, config, appConfig.ConfigFilePath()) } return nil } @@ -57,6 +57,7 @@ func determineImage(ctx context.Context, appConfig *appconfig.Config, useWG, rec ldClient := launchdarkly.ClientFromContext(ctx) depotBool := ldClient.GetFeatureFlagValue("use-depot-for-builds", true).(bool) + useManagedBuilder := ldClient.ManagedBuilderEnabled() switch flag.GetString(ctx, "depot") { case "", "true": @@ -65,11 +66,30 @@ func determineImage(ctx context.Context, appConfig *appconfig.Config, useWG, rec depotBool = false case "auto": default: - return nil, fmt.Errorf("invalid falue for the 'depot' flag. must be 'true', 'false', or ''") + return nil, fmt.Errorf("invalid value for the 'depot' flag. must be 'true', 'false', or ''") + } + + switch flag.GetString(ctx, "builder-pool") { + case "", "true": + span.AddEvent("opt-in builder-pool") + useManagedBuilder = true + case "false": + useManagedBuilder = false + case "auto": + // nothing + default: + return nil, fmt.Errorf("invalid value for the 'builder-pool' flag. must be 'true', 'false', or ''") } tb := render.NewTextBlock(ctx, "Building image") - daemonType := imgsrc.NewDockerDaemonType(!flag.GetRemoteOnly(ctx), !flag.GetLocalOnly(ctx), env.IsCI(), depotBool, flag.GetBool(ctx, "nixpacks")) + daemonType := imgsrc.NewDockerDaemonType( + !flag.GetRemoteOnly(ctx), + !flag.GetLocalOnly(ctx), + env.IsCI(), + depotBool, + flag.GetBool(ctx, "nixpacks"), + useManagedBuilder, + ) client := flyutil.ClientFromContext(ctx) io := iostreams.FromContext(ctx) @@ -78,10 +98,30 @@ func determineImage(ctx context.Context, appConfig *appconfig.Config, useWG, rec if err := multipleDockerfile(ctx, appConfig); err != nil { span.AddEvent("found multiple dockerfiles") - terminal.Warnf("%s\n", err.Error()) + terminal.Warnf("%s", err.Error()) } - resolver := imgsrc.NewResolver(daemonType, client, appConfig.AppName, io, useWG, recreateBuilder) + org, err := client.GetOrganizationByApp(ctx, appConfig.AppName) + if err != nil { + return nil, err + } + + var provisioner *imgsrc.Provisioner + buildkitAddr := flag.GetBuildkitAddr(ctx) + buildkitImage := flag.GetBuildkitImage(ctx) + if flag.GetBool(ctx, "buildkit") && buildkitImage == "" && buildkitAddr == "" { + buildkitImage = imgsrc.DefaultBuildkitImage + } + if buildkitAddr != "" || buildkitImage != "" { + provisioner = imgsrc.NewBuildkitProvisioner(org, buildkitAddr, buildkitImage) + } else { + provisioner = imgsrc.NewProvisioner(org) + } + resolver := imgsrc.NewResolver( + daemonType, client, appConfig.AppName, io, + useWG, recreateBuilder, + imgsrc.WithProvisioner(provisioner), + ) var imageRef string if imageRef, err = fetchImageRef(ctx, appConfig); err != nil { @@ -94,7 +134,7 @@ func determineImage(ctx context.Context, appConfig *appconfig.Config, useWG, rec opts := imgsrc.RefOptions{ AppName: appConfig.AppName, WorkingDir: state.WorkingDirectory(ctx), - Publish: !flag.GetBuildOnly(ctx), + Publish: flag.GetBool(ctx, "push") || !flag.GetBuildOnly(ctx), ImageRef: imageRef, ImageLabel: flag.GetString(ctx, "image-label"), } @@ -134,10 +174,11 @@ func determineImage(ctx context.Context, appConfig *appconfig.Config, useWG, rec if appConfig.Experimental != nil { opts.UseOverlaybd = appConfig.Experimental.LazyLoadImages - - opts.UseZstd = appConfig.Experimental.UseZstd } + // Determine compression based on CLI flags, then app config, then LaunchDarkly, then default to gzip + opts.Compression, opts.CompressionLevel = appConfig.DetermineCompression(ctx) + // flyctl supports key=value form while Docker supports id=key,src=/path/to/secret form. // https://docs.docker.com/engine/reference/commandline/buildx_build/#secret cliBuildSecrets, err := cmdutil.ParseKVStringsToMap(flag.GetStringArray(ctx, "build-secret")) diff --git a/internal/command/deploy/deploy_build_test.go b/internal/command/deploy/deploy_build_test.go index 3275f79fba..5a4aac4f51 100644 --- a/internal/command/deploy/deploy_build_test.go +++ b/internal/command/deploy/deploy_build_test.go @@ -2,33 +2,38 @@ package deploy import ( "context" + "os" + "path/filepath" + "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/superfly/flyctl/internal/appconfig" "github.com/superfly/flyctl/internal/state" - "os" - "path/filepath" - "testing" ) func TestMultipleDockerfile(t *testing.T) { dir := t.TempDir() - f, err := os.Create(filepath.Join(dir, "Dockerfile")) + dockerfile, err := os.Create(filepath.Join(dir, "Dockerfile")) + require.NoError(t, err) + defer dockerfile.Close() // skipcq: GO-S2307 + + flyToml, err := os.Create(filepath.Join(dir, "fly.production.toml")) require.NoError(t, err) - defer f.Close() // skipcq: GO-S2307 + defer flyToml.Close() // skipcq: GO-S2307 + + cfg, err := appconfig.LoadConfig(flyToml.Name()) + require.NoError(t, err) + cfg.Build = &appconfig.Build{ + Dockerfile: "Dockerfile.from-fly-toml", + } ctx := state.WithWorkingDirectory(context.Background(), dir) err = multipleDockerfile(ctx, &appconfig.Config{}) + assert.NoError(t, err) - err = multipleDockerfile( - ctx, - &appconfig.Config{ - Build: &appconfig.Build{ - Dockerfile: "Dockerfile.from-fly-toml", - }, - }, - ) - assert.Error(t, err) + err = multipleDockerfile(ctx, cfg) + assert.ErrorContains(t, err, "fly.production.toml") } diff --git a/internal/command/deploy/deploy_first.go b/internal/command/deploy/deploy_first.go index 816169c1e9..86bfcf9325 100644 --- a/internal/command/deploy/deploy_first.go +++ b/internal/command/deploy/deploy_first.go @@ -154,6 +154,7 @@ func (md *machineDeployment) provisionVolumesOnFirstDeploy(ctx context.Context) ComputeRequirements: guest, ComputeImage: md.img, SnapshotRetention: m.SnapshotRetention, + AutoBackupEnabled: m.ScheduledSnapshots, } vol, err := md.flapsClient.CreateVolume(ctx, input) diff --git a/internal/command/deploy/machinebasedtest.go b/internal/command/deploy/machinebasedtest.go index 533c176fd0..66000b8ff8 100644 --- a/internal/command/deploy/machinebasedtest.go +++ b/internal/command/deploy/machinebasedtest.go @@ -2,13 +2,15 @@ package deploy import ( "context" + "errors" "fmt" "time" - "github.com/cenkalti/backoff" + "github.com/cenkalti/backoff/v5" "github.com/samber/lo" fly "github.com/superfly/fly-go" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/machine" "github.com/superfly/flyctl/internal/statuslogger" "github.com/superfly/flyctl/internal/tracing" @@ -147,25 +149,25 @@ func (md *machineDeployment) runTestMachines(ctx context.Context, machineToTest return nil } -const ErrNoLogsFound = "no logs found" +var errNoLogsFound = errors.New("no logs found") func (md *machineDeployment) waitForLogs(ctx context.Context, mach *fly.Machine, timeout time.Duration) error { b := backoff.NewExponentialBackOff() b.InitialInterval = 1 * time.Second b.MaxInterval = 10 * time.Second - b.MaxElapsedTime = timeout - return backoff.Retry(func() error { + _, err := backoff.Retry(ctx, func() ([]fly.LogEntry, error) { logs, _, err := md.apiClient.GetAppLogs(ctx, md.app.Name, "", md.appConfig.PrimaryRegion, mach.ID) if err != nil { - return err + return nil, err } if len(logs) == 0 { - return fmt.Errorf(ErrNoLogsFound) + return nil, errNoLogsFound } - return nil - }, backoff.WithContext(b, ctx)) + return logs, nil + }, backoff.WithBackOff(b), backoff.WithMaxElapsedTime(timeout)) + return err } func (md *machineDeployment) createTestMachine(ctx context.Context, svc *appconfig.ServiceMachineCheck, machineToTest *fly.Machine, sl statuslogger.StatusLine) (*fly.Machine, error) { @@ -206,9 +208,14 @@ func (md *machineDeployment) launchInputForTestMachine(svc *appconfig.ServiceMac mConfig.Guest.HostDedicationID = hdid } + minvers, err := appsecrets.GetMinvers(md.appConfig.AppName) + if err != nil { + return nil, err + } return &fly.LaunchMachineInput{ - Config: mConfig, - Region: origMachineRaw.Region, + Config: mConfig, + Region: origMachineRaw.Region, + MinSecretsVersion: minvers, }, nil } diff --git a/internal/command/deploy/machines.go b/internal/command/deploy/machines.go index 09bccc5bdd..02d1374192 100644 --- a/internal/command/deploy/machines.go +++ b/internal/command/deploy/machines.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "maps" "slices" "strings" "sync" @@ -27,7 +28,6 @@ import ( "github.com/superfly/flyctl/terminal" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" - "golang.org/x/exp/maps" ) const ( @@ -76,6 +76,7 @@ type MachineDeploymentArgs struct { RestartMaxRetries int DeployRetries int BuildID string + BuilderID string } func argsFromManifest(manifest *DeployManifest, app *fly.AppCompact) MachineDeploymentArgs { @@ -155,6 +156,7 @@ type machineDeployment struct { tigrisStatics *statics.DeployerState deployRetries int buildID string + builderID string } func NewMachineDeployment(ctx context.Context, args MachineDeploymentArgs) (_ MachineDeployment, err error) { @@ -282,6 +284,7 @@ func NewMachineDeployment(ctx context.Context, args MachineDeploymentArgs) (_ Ma processGroups: args.ProcessGroups, deployRetries: args.DeployRetries, buildID: args.BuildID, + builderID: args.BuilderID, } if err := md.setStrategy(); err != nil { tracing.RecordError(span, err, "failed to set strategy") @@ -409,7 +412,7 @@ func (md *machineDeployment) setMachinesForDeployment(ctx context.Context) error s = "s" } - filtersAppliedStr := strings.Join(maps.Keys(filtersApplied), "/") + filtersAppliedStr := strings.Join(slices.Collect(maps.Keys(filtersApplied)), "/") fmt.Fprintf(md.io.ErrOut, "%s filter%s applied, deploying to %d/%d machines\n", filtersAppliedStr, s, len(machines), nMachines) } @@ -420,6 +423,9 @@ func (md *machineDeployment) setMachinesForDeployment(ctx context.Context) error if m.Config.Metadata[fly.MachineConfigMetadataKeyFlyProcessGroup] == "" { m.Config.Metadata[fly.MachineConfigMetadataKeyFlyProcessGroup] = md.appConfig.DefaultProcessName() } + if md.builderID != "" { + m.Config.Metadata["fly_builder_id"] = md.builderID + } } } diff --git a/internal/command/deploy/machines_deploymachinesapp.go b/internal/command/deploy/machines_deploymachinesapp.go index 0dc1da2331..57378b25de 100644 --- a/internal/command/deploy/machines_deploymachinesapp.go +++ b/internal/command/deploy/machines_deploymachinesapp.go @@ -5,6 +5,7 @@ import ( "context" "errors" "fmt" + "maps" "math" "net" "slices" @@ -12,7 +13,7 @@ import ( "strings" "time" - "github.com/cenkalti/backoff" + "github.com/cenkalti/backoff/v5" "github.com/miekg/dns" "github.com/samber/lo" "github.com/sourcegraph/conc/pool" @@ -32,7 +33,6 @@ import ( "github.com/superfly/flyctl/terminal" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" - "golang.org/x/exp/maps" "golang.org/x/sync/errgroup" ) @@ -222,9 +222,18 @@ func (md *machineDeployment) restartMachinesApp(ctx context.Context) error { ctx, span := tracing.GetTracer().Start(ctx, "restart_machines") defer span.End() + var jerr error machineUpdateEntries := lo.Map(md.machineSet.GetMachines(), func(lm machine.LeasableMachine, _ int) *machineUpdateEntry { - return &machineUpdateEntry{leasableMachine: lm, launchInput: md.launchInputForRestart(lm.Machine())} + launchInput, err := md.launchInputForRestart(lm.Machine()) + if err != nil { + jerr = errors.Join(jerr, err) + return nil + } + return &machineUpdateEntry{leasableMachine: lm, launchInput: launchInput} }) + if jerr != nil { + return jerr + } return md.updateExistingMachines(ctx, machineUpdateEntries) } @@ -317,7 +326,7 @@ func (md *machineDeployment) deployCanaryMachines(ctx context.Context) (err erro func (md *machineDeployment) deployCreateMachinesForGroups(ctx context.Context, processGroupMachineDiff ProcessGroupsDiff) (err error) { groupsWithAutostopEnabled := make(map[string]bool) groupsWithAutosuspendEnabled := make(map[string]bool) - groups := maps.Keys(processGroupMachineDiff.groupsNeedingMachines) + groups := slices.Collect(maps.Keys(processGroupMachineDiff.groupsNeedingMachines)) total := len(groups) slices.Sort(groups) @@ -425,7 +434,7 @@ func (md *machineDeployment) deployMachinesApp(ctx context.Context) error { defer span.End() if !md.skipReleaseCommand { - if err := md.runReleaseCommand(ctx); err != nil { + if err := md.runReleaseCommands(ctx); err != nil { return fmt.Errorf("release command failed - aborting deployment. %w", err) } } @@ -960,6 +969,14 @@ func (md *machineDeployment) updateEntriesGroup(parentCtx context.Context, group return updatePool.Wait() } +// releaseLease releases the lease and log the error if any. +func releaseLease(ctx context.Context, m machine.LeasableMachine) { + err := m.ReleaseLease(ctx) + if err != nil { + terminal.Warnf("failed to release lease for machine %s: %s", m.FormattedMachineId(), err) + } +} + func (md *machineDeployment) updateMachineByReplace(ctx context.Context, e *machineUpdateEntry) error { ctx, span := tracing.GetTracer().Start(ctx, "update_by_replace", trace.WithAttributes(attribute.String("id", e.launchInput.ID))) defer span.End() @@ -981,7 +998,7 @@ func (md *machineDeployment) updateMachineByReplace(ctx context.Context, e *mach } lm = machine.NewLeasableMachine(md.flapsClient, md.io, newMachineRaw, false) - defer lm.ReleaseLease(ctx) + defer releaseLease(ctx, lm) e.leasableMachine = lm return nil } @@ -1059,7 +1076,7 @@ func (md *machineDeployment) spawnMachineInGroup(ctx context.Context, groupName lm := machine.NewLeasableMachine(md.flapsClient, md.io, newMachineRaw, false) statuslogger.Logf(ctx, "Machine %s was created", md.colorize.Bold(lm.FormattedMachineId())) - defer lm.ReleaseLease(ctx) + defer releaseLease(ctx, lm) // Don't wait for SkipLaunch machines, they are created but not started if launchInput.SkipLaunch { @@ -1359,16 +1376,15 @@ func (md *machineDeployment) checkDNS(ctx context.Context) error { b := backoff.NewExponentialBackOff() b.InitialInterval = 1 * time.Second b.MaxInterval = 5 * time.Second - b.MaxElapsedTime = 60 * time.Second - return backoff.Retry(func() error { + _, err = backoff.Retry(ctx, func() (any, error) { m := new(dns.Msg) var numIPv4, numIPv6 int for _, ipAddr := range ipAddrs { - if ipAddr.Type == "v4" || ipAddr.Type == "shared_v4" { + if (ipAddr.Type == "v4" && ipAddr.Region == "global") || ipAddr.Type == "shared_v4" { numIPv4 += 1 - } else if ipAddr.Type == "v6" { + } else if ipAddr.Type == "v6" && ipAddr.Region == "global" { numIPv6 += 1 } } @@ -1381,11 +1397,11 @@ func (md *machineDeployment) checkDNS(ctx context.Context) error { answerv4, _, err := c.Exchange(m, "8.8.8.8:53") if err != nil { tracing.RecordError(span, err, "failed to exchange v4") - return err + return nil, err } else if len(answerv4.Answer) != numIPv4 { span.SetAttributes(attribute.String("v4_answer", answerv4.String())) tracing.RecordError(span, errors.New("v4 response count mismatch"), "v4 response count mismatch") - return fmt.Errorf("expected %d A records for %s, got %d", numIPv4, fqdn, len(answerv4.Answer)) + return nil, fmt.Errorf("expected %d A records for %s, got %d", numIPv4, fqdn, len(answerv4.Answer)) } m.SetQuestion(fqdn, dns.TypeAAAA) @@ -1393,16 +1409,16 @@ func (md *machineDeployment) checkDNS(ctx context.Context) error { answerv6, _, err := c.Exchange(m, "8.8.8.8:53") if err != nil { tracing.RecordError(span, err, "failed to exchange v4") - return err + return nil, err } else if len(answerv6.Answer) != numIPv6 { span.SetAttributes(attribute.String("v6_answer", answerv6.String())) tracing.RecordError(span, errors.New("v6 response count mismatch"), "v6 response count mismatch") - return fmt.Errorf("expected %d AAAA records for %s, got %d", numIPv6, fqdn, len(answerv6.Answer)) + return nil, fmt.Errorf("expected %d AAAA records for %s, got %d", numIPv6, fqdn, len(answerv6.Answer)) } - return nil - }, backoff.WithContext(b, ctx)) - + return nil, nil + }, backoff.WithBackOff(b), backoff.WithMaxElapsedTime(60*time.Second)) + return err } else { return nil } diff --git a/internal/command/deploy/machines_launchinput.go b/internal/command/deploy/machines_launchinput.go index 3983cd7c53..77db506a45 100644 --- a/internal/command/deploy/machines_launchinput.go +++ b/internal/command/deploy/machines_launchinput.go @@ -7,21 +7,29 @@ import ( "github.com/samber/lo" fly "github.com/superfly/fly-go" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/buildinfo" + "github.com/superfly/flyctl/internal/containerconfig" "github.com/superfly/flyctl/internal/machine" "github.com/superfly/flyctl/terminal" ) -func (md *machineDeployment) launchInputForRestart(origMachineRaw *fly.Machine) *fly.LaunchMachineInput { +func (md *machineDeployment) launchInputForRestart(origMachineRaw *fly.Machine) (*fly.LaunchMachineInput, error) { + minvers, err := appsecrets.GetMinvers(md.appConfig.AppName) + if err != nil { + return nil, err + } + mConfig := machine.CloneConfig(origMachineRaw.Config) md.setMachineReleaseData(mConfig) return &fly.LaunchMachineInput{ - ID: origMachineRaw.ID, - Config: mConfig, - Region: origMachineRaw.Region, - SkipLaunch: skipLaunch(origMachineRaw, mConfig), - } + ID: origMachineRaw.ID, + Config: mConfig, + Region: origMachineRaw.Region, + SkipLaunch: skipLaunch(origMachineRaw, mConfig), + MinSecretsVersion: minvers, + }, nil } func (md *machineDeployment) launchInputForLaunch(processGroup string, guest *fly.MachineGuest, standbyFor []string) (*fly.LaunchMachineInput, error) { @@ -59,10 +67,21 @@ func (md *machineDeployment) launchInputForLaunch(processGroup string, guest *fl mConfig.Guest.HostDedicationID = hdid } + // Update container image + if err = md.updateContainerImage(mConfig); err != nil { + return nil, err + } + + minvers, err := appsecrets.GetMinvers(md.appConfig.AppName) + if err != nil { + return nil, err + } + return &fly.LaunchMachineInput{ - Region: region, - Config: mConfig, - SkipLaunch: skipLaunch(nil, mConfig), + Region: region, + Config: mConfig, + SkipLaunch: skipLaunch(nil, mConfig), + MinSecretsVersion: minvers, }, nil } @@ -86,6 +105,35 @@ func (md *machineDeployment) launchInputForUpdate(origMachineRaw *fly.Machine) ( // Get the final process group and prevent empty string processGroup = mConfig.ProcessGroup() + // Update container image + if err = md.updateContainerImage(mConfig); err != nil { + return nil, err + } + + // Ensure container files are re-processed if they reference local files + // This is necessary because local files may have been updated since initial parsing + if (md.appConfig.MachineConfig != "" || (md.appConfig.Build != nil && md.appConfig.Build.Compose != nil)) && hasContainerFiles(mConfig) { + // Re-parse the container config to get fresh file content + composePath := "" + if md.appConfig.Build != nil && md.appConfig.Build.Compose != nil { + // DetectComposeFile returns the explicit file if set, otherwise auto-detects + composePath = md.appConfig.DetectComposeFile() + } + tempConfig := &fly.MachineConfig{} + err := containerconfig.ParseContainerConfig(tempConfig, composePath, md.appConfig.MachineConfig, md.appConfig.ConfigFilePath(), md.appConfig.Container) + if err == nil && len(tempConfig.Containers) > 0 { + // Apply container files from the re-parsed config + for _, container := range mConfig.Containers { + for _, tempContainer := range tempConfig.Containers { + if container.Name == tempContainer.Name && len(tempContainer.Files) > 0 { + // Update container files with fresh content + container.Files = tempContainer.Files + } + } + } + } + } + // Mounts needs special treatment: // * Volumes attached to existings machines can't be swapped by other volumes // * The only allowed in-place operation is to update its destination mount path @@ -184,15 +232,32 @@ func (md *machineDeployment) launchInputForUpdate(origMachineRaw *fly.Machine) ( mConfig.Guest.HostDedicationID = hdid } + minvers, err := appsecrets.GetMinvers(md.appConfig.AppName) + if err != nil { + return nil, err + } + return &fly.LaunchMachineInput{ ID: mID, Region: origMachineRaw.Region, Config: mConfig, SkipLaunch: skipLaunch(origMachineRaw, mConfig), RequiresReplacement: machineShouldBeReplaced, + MinSecretsVersion: minvers, }, nil } +// hasContainerFiles returns true if any container has file configurations +// that might need refreshing from local sources +func hasContainerFiles(mConfig *fly.MachineConfig) bool { + for _, container := range mConfig.Containers { + if len(container.Files) > 0 { + return true + } + } + return false +} + func (md *machineDeployment) setMachineReleaseData(mConfig *fly.MachineConfig) { mConfig.Metadata = lo.Assign(mConfig.Metadata, map[string]string{ fly.MachineConfigMetadataKeyFlyReleaseId: md.releaseId, @@ -217,6 +282,10 @@ func (md *machineDeployment) setMachineReleaseData(mConfig *fly.MachineConfig) { } else { delete(mConfig.Metadata, fly.MachineConfigMetadataKeyFlyManagedPostgres) } + + if md.builderID != "" { + mConfig.Metadata["fly_builder_id"] = md.builderID + } } // Skip launching currently-stopped or suspended machines if: @@ -242,3 +311,16 @@ func skipLaunch(origMachineRaw *fly.Machine, mConfig *fly.MachineConfig) bool { } return false } + +// updateContainerImage sets container.Image = mConfig.Image in any container where image == "." +func (md *machineDeployment) updateContainerImage(mConfig *fly.MachineConfig) error { + if len(mConfig.Containers) != 0 { + for i := range mConfig.Containers { + if mConfig.Containers[i].Image == "." { + mConfig.Containers[i].Image = mConfig.Image + } + } + } + + return nil +} diff --git a/internal/command/deploy/machines_launchinput_test.go b/internal/command/deploy/machines_launchinput_test.go index 6fc0ac3b15..431e47c4be 100644 --- a/internal/command/deploy/machines_launchinput_test.go +++ b/internal/command/deploy/machines_launchinput_test.go @@ -67,6 +67,7 @@ func testLaunchInputForBasic(t *testing.T) { "fly_flyctl_version": buildinfo.Version().String(), }, }, + MinSecretsVersion: nil, } li, err := md.launchInputForLaunch("", nil, nil) require.NoError(t, err) @@ -93,7 +94,8 @@ func testLaunchInputForBasic(t *testing.T) { want.Config.Metadata["fly_release_id"] = "new_release_id" want.Config.Metadata["fly_release_version"] = "4" want.Config.Metadata["user-added-me"] = "keep it" - li = md.launchInputForRestart(origMachineRaw) + li, err = md.launchInputForRestart(origMachineRaw) + assert.NoError(t, err) assert.Equal(t, want, li) // Now updating the machines must include changes to appConfig @@ -448,7 +450,8 @@ func testLaunchInputForUpdateKeepUnmanagedFields(t *testing.T) { assert.Equal(t, &fly.DNSConfig{SkipRegistration: true}, li.Config.DNS) assert.Equal(t, []fly.MachineProcess{{CmdOverride: []string{"foo"}}}, li.Config.Processes) - li = md.launchInputForRestart(origMachineRaw) + li, err = md.launchInputForRestart(origMachineRaw) + assert.NoError(t, err) assert.Equal(t, "ab1234567890", li.ID) assert.Equal(t, "ord", li.Region) assert.Equal(t, "24/7", li.Config.Schedule) @@ -521,6 +524,7 @@ func testLaunchInputForLaunchFiles(t *testing.T) { }, }, }, + MinSecretsVersion: nil, } li, err := md.launchInputForLaunch("", nil, nil) require.NoError(t, err) diff --git a/internal/command/deploy/machines_releasecommand.go b/internal/command/deploy/machines_releasecommand.go index 4c8551689c..7f7b7a1a05 100644 --- a/internal/command/deploy/machines_releasecommand.go +++ b/internal/command/deploy/machines_releasecommand.go @@ -12,10 +12,12 @@ import ( "github.com/logrusorgru/aurora" "github.com/samber/lo" + "github.com/sourcegraph/conc/pool" fly "github.com/superfly/fly-go" "github.com/superfly/fly-go/flaps" "github.com/superfly/flyctl/helpers" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/config" "github.com/superfly/flyctl/internal/flag" "github.com/superfly/flyctl/internal/format" @@ -27,22 +29,38 @@ import ( "golang.org/x/sync/errgroup" ) -func (md *machineDeployment) runReleaseCommand(ctx context.Context) (err error) { - ctx, span := tracing.GetTracer().Start(ctx, "run_release_cmd") +func (md *machineDeployment) runReleaseCommands(ctx context.Context) error { + err := md.runReleaseCommand(ctx, "release") + + if err == nil { + seedCommand := appconfig.SeedCommandFromContext(ctx) + + if seedCommand != "" { + md.appConfig.Deploy.ReleaseCommand = seedCommand + err = md.runReleaseCommand(ctx, "seed") + } + } + + return err +} + +func (md *machineDeployment) runReleaseCommand(ctx context.Context, commandType string) (err error) { + ctx, span := tracing.GetTracer().Start(ctx, "run_"+commandType+"_cmd") defer func() { if err != nil { - tracing.RecordError(span, err, "failed to run release_cmd") + tracing.RecordError(span, err, "failed to run "+commandType+"_cmd") } span.End() }() if md.appConfig.Deploy == nil || md.appConfig.Deploy.ReleaseCommand == "" { - span.AddEvent("no release command") + span.AddEvent("no " + commandType + " command") return nil } - fmt.Fprintf(md.io.ErrOut, "Running %s release_command: %s\n", + fmt.Fprintf(md.io.ErrOut, "Running %s %s_command: %s\n", md.colorize.Bold(md.app.Name), + commandType, md.appConfig.Deploy.ReleaseCommand, ) ctx, loggerCleanup := statuslogger.SingleLine(ctx, true) @@ -64,8 +82,8 @@ func (md *machineDeployment) runReleaseCommand(ctx context.Context) (err error) eg.Go(func() error { err := md.createOrUpdateReleaseCmdMachine(groupCtx) if err != nil { - tracing.RecordError(span, err, "failed to create release cmd machine") - return fmt.Errorf("error running release_command machine: %w", err) + tracing.RecordError(span, err, "failed to create "+commandType+" cmd machine") + return fmt.Errorf("error running %s_command machine: %w", commandType, err) } return nil }) @@ -117,24 +135,24 @@ func (md *machineDeployment) runReleaseCommand(ctx context.Context) (err error) fmt.Fprintln(md.io.ErrOut, "Starting machine") if err = releaseCmdMachine.Start(ctx); err != nil { - fmt.Fprintf(md.io.ErrOut, "error starting release_command machine: %v\n", err) + fmt.Fprintf(md.io.ErrOut, "error starting %s_command machine: %v\n", commandType, err) return } // FIXME: consolidate this wait stuff with deploy waits? Especially once we improve the outpu err = md.waitForReleaseCommandToFinish(ctx, releaseCmdMachine) if err != nil { - tracing.RecordError(span, err, "failed to wait for release cmd machine") + tracing.RecordError(span, err, "failed to wait for "+commandType+" cmd machine") return err } lastExitEvent, err := releaseCmdMachine.WaitForEventTypeAfterType(ctx, "exit", "start", md.releaseCmdTimeout, true) if err != nil { - return fmt.Errorf("error finding the release_command machine %s exit event: %w", releaseCmdMachine.Machine().ID, err) + return fmt.Errorf("error finding the %s_command machine %s exit event: %w", commandType, releaseCmdMachine.Machine().ID, err) } exitCode, err := lastExitEvent.Request.GetExitCode() if err != nil { - return fmt.Errorf("error get release_command machine %s exit code: %w", releaseCmdMachine.Machine().ID, err) + return fmt.Errorf("error get %s_command machine %s exit code: %w", commandType, releaseCmdMachine.Machine().ID, err) } if flag.GetBool(ctx, "verbose") { @@ -142,7 +160,7 @@ func (md *machineDeployment) runReleaseCommand(ctx context.Context) (err error) } if exitCode != 0 { - statuslogger.LogStatus(ctx, statuslogger.StatusFailure, "release_command failed") + statuslogger.LogStatus(ctx, statuslogger.StatusFailure, commandType+"_command failed") // Preemptive cleanup of the logger so that the logs have a clean place to write to loggerCleanup(false) @@ -163,7 +181,8 @@ func (md *machineDeployment) runReleaseCommand(ctx context.Context) (err error) } statuslogger.LogfStatus(ctx, statuslogger.StatusSuccess, - "release_command %s completed successfully", + "%s_command %s completed successfully", + commandType, md.colorize.Bold(releaseCmdMachine.Machine().ID), ) return nil @@ -194,29 +213,31 @@ func dedicatedHostIdMismatch(m *fly.Machine, ac *appconfig.Config) bool { func (md *machineDeployment) createOrUpdateReleaseCmdMachine(ctx context.Context) error { span := trace.SpanFromContext(ctx) - if md.releaseCommandMachine.IsEmpty() { - return md.createReleaseCommandMachine(ctx) - } - - releaseCmdMachine := md.releaseCommandMachine.GetMachines()[0] - - if dedicatedHostIdMismatch(releaseCmdMachine.Machine(), md.appConfig) { - span.AddEvent("dedicated hostid mismatch") - if err := releaseCmdMachine.Destroy(ctx, true); err != nil { - return fmt.Errorf("error destroying release_command machine: %w", err) + // Existent release command machines must be destroyed if not already, are set to auto-destroy anyways + if !md.releaseCommandMachine.IsEmpty() { + mPool := pool.New().WithErrors().WithMaxGoroutines(4).WithContext(ctx) + for _, m := range md.releaseCommandMachine.GetMachines() { + mPool.Go(func(ctx context.Context) error { + return m.Destroy(ctx, true) + }) + } + if err := mPool.Wait(); err != nil { + tracing.RecordError(span, err, "failed to destroy old release_command machine") } - - return md.createReleaseCommandMachine(ctx) } - return md.updateReleaseCommandMachine(ctx) + return md.createReleaseCommandMachine(ctx) } func (md *machineDeployment) createReleaseCommandMachine(ctx context.Context) error { ctx, span := tracing.GetTracer().Start(ctx, "create_release_cmd_machine") defer span.End() - launchInput := md.launchInputForReleaseCommand(nil) + launchInput, err := md.launchInputForReleaseCommand(nil) + if err != nil { + return err + } + releaseCmdMachine, err := md.flapsClient.Launch(ctx, *launchInput) if err != nil { tracing.RecordError(span, err, "failed to get ip addresses") @@ -235,33 +256,7 @@ func (md *machineDeployment) createReleaseCommandMachine(ctx context.Context) er return nil } -func (md *machineDeployment) updateReleaseCommandMachine(ctx context.Context) error { - ctx, span := tracing.GetTracer().Start(ctx, "update_release_cmd_machine") - defer span.End() - - releaseCmdMachine := md.releaseCommandMachine.GetMachines()[0] - fmt.Fprintf(md.io.ErrOut, " Updating release_command machine %s\n", md.colorize.Bold(releaseCmdMachine.Machine().ID)) - - if err := releaseCmdMachine.WaitForState(ctx, fly.MachineStateStopped, md.waitTimeout, false); err != nil { - err = suggestChangeWaitTimeout(err, "wait-timeout") - return err - } - - if err := md.releaseCommandMachine.AcquireLeases(ctx, md.leaseTimeout); err != nil { - return err - } - defer md.releaseCommandMachine.ReleaseLeases(ctx) // skipcq: GO-S2307 - md.releaseCommandMachine.StartBackgroundLeaseRefresh(ctx, md.leaseTimeout, md.leaseDelayBetween) - - launchInput := md.launchInputForReleaseCommand(releaseCmdMachine.Machine()) - if err := releaseCmdMachine.Update(ctx, *launchInput); err != nil { - return fmt.Errorf("error updating release_command machine: %w", err) - } - - return nil -} - -func (md *machineDeployment) launchInputForReleaseCommand(origMachineRaw *fly.Machine) *fly.LaunchMachineInput { +func (md *machineDeployment) launchInputForReleaseCommand(origMachineRaw *fly.Machine) (*fly.LaunchMachineInput, error) { if origMachineRaw == nil { origMachineRaw = &fly.Machine{ Region: md.appConfig.PrimaryRegion, @@ -280,11 +275,16 @@ func (md *machineDeployment) launchInputForReleaseCommand(origMachineRaw *fly.Ma mConfig.Guest.HostDedicationID = hdid } - return &fly.LaunchMachineInput{ - Config: mConfig, - Region: origMachineRaw.Region, - SkipLaunch: true, + minvers, err := appsecrets.GetMinvers(md.appConfig.AppName) + if err != nil { + return nil, err } + return &fly.LaunchMachineInput{ + Config: mConfig, + Region: origMachineRaw.Region, + SkipLaunch: true, + MinSecretsVersion: minvers, + }, nil } func (md *machineDeployment) inferReleaseCommandGuest() *fly.MachineGuest { @@ -332,5 +332,6 @@ func (md *machineDeployment) waitForReleaseCommandToFinish(ctx context.Context, err = suggestChangeWaitTimeout(err, "release-command-timeout") return fmt.Errorf("error waiting for release_command machine %s to finish running: %w", releaseCmdMachine.Machine().ID, err) } + md.releaseCommandMachine.RemoveMachines(ctx, []machine.LeasableMachine{releaseCmdMachine}) return nil } diff --git a/internal/command/deploy/machines_test.go b/internal/command/deploy/machines_test.go index 6cb5721549..7951fe9ab1 100644 --- a/internal/command/deploy/machines_test.go +++ b/internal/command/deploy/machines_test.go @@ -54,6 +54,7 @@ func Test_resolveUpdatedMachineConfig_Basic(t *testing.T) { "fly_flyctl_version": buildinfo.Version().String(), }, }, + MinSecretsVersion: nil, }, li) } @@ -145,9 +146,11 @@ func Test_resolveUpdatedMachineConfig_ReleaseCommand(t *testing.T) { }, }, }, + MinSecretsVersion: nil, }, li) - got := md.launchInputForReleaseCommand(nil) + got, err := md.launchInputForReleaseCommand(nil) + assert.NoError(t, err) // New release command machine assert.Equal(t, &fly.LaunchMachineInput{ @@ -178,7 +181,8 @@ func Test_resolveUpdatedMachineConfig_ReleaseCommand(t *testing.T) { }, Guest: fly.MachinePresets["shared-cpu-2x"], }, - SkipLaunch: true, + SkipLaunch: true, + MinSecretsVersion: nil, }, got) // Update existing release command machine @@ -198,7 +202,8 @@ func Test_resolveUpdatedMachineConfig_ReleaseCommand(t *testing.T) { }, } - got = md.launchInputForReleaseCommand(origMachine) + got, err = md.launchInputForReleaseCommand(origMachine) + assert.NoError(t, err) assert.Equal(t, &fly.LaunchMachineInput{ Config: &fly.MachineConfig{ @@ -228,7 +233,8 @@ func Test_resolveUpdatedMachineConfig_ReleaseCommand(t *testing.T) { }, Guest: fly.MachinePresets["shared-cpu-2x"], }, - SkipLaunch: true, + SkipLaunch: true, + MinSecretsVersion: nil, }, got) } @@ -268,6 +274,7 @@ func Test_resolveUpdatedMachineConfig_Mounts(t *testing.T) { Name: "data", }}, }, + MinSecretsVersion: nil, }, li) origMachine := &fly.Machine{ @@ -302,6 +309,7 @@ func Test_resolveUpdatedMachineConfig_Mounts(t *testing.T) { Path: "/data", }}, }, + MinSecretsVersion: nil, }, li) } @@ -327,7 +335,8 @@ func Test_resolveUpdatedMachineConfig_restartOnly(t *testing.T) { }, } - got := md.launchInputForRestart(origMachine) + got, err := md.launchInputForRestart(origMachine) + assert.NoError(t, err) assert.Equal(t, &fly.LaunchMachineInput{ ID: "OrigID", @@ -341,6 +350,7 @@ func Test_resolveUpdatedMachineConfig_restartOnly(t *testing.T) { "fly_flyctl_version": buildinfo.Version().String(), }, }, + MinSecretsVersion: nil, }, got) } @@ -374,7 +384,9 @@ func Test_resolveUpdatedMachineConfig_restartOnlyProcessGroup(t *testing.T) { }, } - got := md.launchInputForRestart(origMachine) + got, err := md.launchInputForRestart(origMachine) + assert.NoError(t, err) + assert.Equal(t, &fly.LaunchMachineInput{ ID: "OrigID", Config: &fly.MachineConfig{ @@ -387,5 +399,6 @@ func Test_resolveUpdatedMachineConfig_restartOnlyProcessGroup(t *testing.T) { "fly_flyctl_version": buildinfo.Version().String(), }, }, + MinSecretsVersion: nil, }, got) } diff --git a/internal/command/deploy/mock_client_test.go b/internal/command/deploy/mock_client_test.go index 92430d13bd..b522b123cc 100644 --- a/internal/command/deploy/mock_client_test.go +++ b/internal/command/deploy/mock_client_test.go @@ -46,10 +46,6 @@ func (m *mockFlapsClient) CreateApp(ctx context.Context, name string, org string return fmt.Errorf("failed to create app %s", name) } -func (m *mockFlapsClient) CreateSecret(ctx context.Context, sLabel, sType string, in fly.CreateSecretRequest) (err error) { - return fmt.Errorf("failed to create secret %s", sLabel) -} - func (m *mockFlapsClient) CreateVolume(ctx context.Context, req fly.CreateVolumeRequest) (*fly.Volume, error) { return nil, fmt.Errorf("failed to create volume %s", req.Name) } @@ -62,8 +58,12 @@ func (m *mockFlapsClient) DeleteMetadata(ctx context.Context, machineID, key str return fmt.Errorf("failed to delete metadata %s", key) } -func (m *mockFlapsClient) DeleteSecret(ctx context.Context, label string) (err error) { - return fmt.Errorf("failed to delete secret %s", label) +func (m *mockFlapsClient) DeleteAppSecret(ctx context.Context, name string) (*fly.DeleteAppSecretResp, error) { + return nil, fmt.Errorf("failed to delete app secret %s", name) +} + +func (m *mockFlapsClient) DeleteSecretKey(ctx context.Context, name string) error { + return fmt.Errorf("failed to delete secret key %s", name) } func (m *mockFlapsClient) DeleteVolume(ctx context.Context, volumeId string) (*fly.Volume, error) { @@ -89,8 +89,9 @@ func (m *mockFlapsClient) FindLease(ctx context.Context, machineID string) (*fly return nil, fmt.Errorf("failed to find lease for %s", machineID) } -func (m *mockFlapsClient) GenerateSecret(ctx context.Context, sLabel, sType string) (err error) { - return fmt.Errorf("failed to generate secret %s", sLabel) +func (m *mockFlapsClient) GenerateSecretKey(ctx context.Context, name string, typ string) (*fly.SetSecretKeyResp, error) { + + return nil, fmt.Errorf("failed to generate app secret %s", name) } func (m *mockFlapsClient) Get(ctx context.Context, machineID string) (*fly.Machine, error) { @@ -158,8 +159,12 @@ func (m *mockFlapsClient) ListFlyAppsMachines(ctx context.Context) ([]*fly.Machi return nil, nil, fmt.Errorf("failed to list fly apps machines") } -func (m *mockFlapsClient) ListSecrets(ctx context.Context) (out []fly.ListSecret, err error) { - return nil, fmt.Errorf("failed to list secrets") +func (m *mockFlapsClient) ListAppSecrets(ctx context.Context, version *uint64, showSecrets bool) ([]fly.AppSecret, error) { + return nil, fmt.Errorf("failed to list app secrets") +} + +func (m *mockFlapsClient) ListSecretKeys(ctx context.Context, version *uint64) ([]fly.SecretKey, error) { + return nil, fmt.Errorf("failed to list secret keys") } func (m *mockFlapsClient) NewRequest(ctx context.Context, method, path string, in interface{}, headers map[string][]string) (*http.Request, error) { @@ -209,6 +214,14 @@ func (m *mockFlapsClient) SetMetadata(ctx context.Context, machineID, key, value return nil } +func (m *mockFlapsClient) SetAppSecret(ctx context.Context, name string, value string) (*fly.SetAppSecretResp, error) { + return nil, fmt.Errorf("failed to set app secret %s", name) +} + +func (m *mockFlapsClient) SetSecretKey(ctx context.Context, name string, typ string, value []byte) (*fly.SetSecretKeyResp, error) { + return nil, fmt.Errorf("failed to set secret key %s", name) +} + func (m *mockFlapsClient) Start(ctx context.Context, machineID string, nonce string) (out *fly.MachineStartResponse, err error) { return nil, fmt.Errorf("failed to start %s", machineID) } @@ -232,6 +245,10 @@ func (m *mockFlapsClient) Update(ctx context.Context, builder fly.LaunchMachineI return nil, fmt.Errorf("failed to update %s", builder.ID) } +func (m *mockFlapsClient) UpdateAppSecrets(ctx context.Context, values map[string]*string) (*fly.UpdateAppSecretsResp, error) { + return nil, fmt.Errorf("failed to update app secret %v", values) +} + func (m *mockFlapsClient) UpdateVolume(ctx context.Context, volumeId string, req fly.UpdateVolumeRequest) (*fly.Volume, error) { return nil, fmt.Errorf("failed to update volume %s", volumeId) } diff --git a/internal/command/deploy/plan.go b/internal/command/deploy/plan.go index b22e2c904c..ce153eeaf4 100644 --- a/internal/command/deploy/plan.go +++ b/internal/command/deploy/plan.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "sort" "strings" "sync" "time" @@ -289,6 +290,11 @@ func (md *machineDeployment) updateMachinesWRecovery(ctx context.Context, oldApp } currentState, err := md.appState(ctx, oldAppState) + // sort machines by id so we always have the same order when retrying + // This is needed for rolling deploys so we can start from the same machine + sort.Slice(currentState.Machines, func(i, j int) bool { + return currentState.Machines[i].ID < currentState.Machines[j].ID + }) if err != nil { span.RecordError(updateErr) return fmt.Errorf("failed to get current app state: %w", err) @@ -328,7 +334,7 @@ func (md *machineDeployment) updateProcessGroup(ctx context.Context, machineTupl ctx, span := tracing.GetTracer().Start(ctx, "update_process_group") defer span.End() - group := errgroup.Group{} + group, gCtx := errgroup.WithContext(ctx) group.SetLimit(poolSize) for _, machPair := range machineTuples { @@ -337,17 +343,37 @@ func (md *machineDeployment) updateProcessGroup(ctx context.Context, machineTupl newMachine := machPair.newMachine group.Go(func() error { - checkResult, _ := healthChecksPassed.Load(machPair.oldMachine.ID) - machineCheckResult := checkResult.(*healthcheckResult) + // if both old and new machines are nil, we don't need to update anything + if oldMachine == nil && newMachine == nil { + span.AddEvent("Both old and new machines are nil") + return nil + } - var sl statuslogger.StatusLine + var machineID string if oldMachine != nil { - sl = machineLogger.getLoggerFromID(oldMachine.ID) - } else if newMachine != nil { - sl = machineLogger.getLoggerFromID(newMachine.ID) + machineID = oldMachine.ID + } else { + machineID = newMachine.ID + } + + sl := machineLogger.getLoggerFromID(machineID) + + if err := gCtx.Err(); err != nil { + sl.LogStatus(statuslogger.StatusFailure, "skipping machine update due to earlier failure") + return err } - err := md.updateMachineWChecks(ctx, oldMachine, newMachine, sl, md.io, machineCheckResult) + checkResult, ok := healthChecksPassed.Load(machineID) + // this shouldn't happen, we ensure that the machine is in the map but just in case + if !ok { + err := fmt.Errorf("no health checks stored for machine") + sl.LogStatus(statuslogger.StatusFailure, err.Error()) + span.RecordError(err) + return fmt.Errorf("failed to update machine %s: %w", machineID, err) + } + machineCheckResult := checkResult.(*healthcheckResult) + + err := md.updateMachineWChecks(gCtx, oldMachine, newMachine, sl, md.io, machineCheckResult) if err != nil { sl.LogStatus(statuslogger.StatusFailure, err.Error()) span.RecordError(err) @@ -456,6 +482,7 @@ func (md *machineDeployment) releaseLeases(ctx context.Context, machineTuples [] sl.LogStatus(statuslogger.StatusFailure, fmt.Sprintf("Failed to clear lease for %s: %v", machine.ID, err)) return err } + machine.LeaseNonce = "" sl.LogStatus(statuslogger.StatusSuccess, fmt.Sprintf("Cleared lease for %s", machine.ID)) return nil @@ -719,7 +746,7 @@ func waitForMachineState(ctx context.Context, lm mach.LeasableMachine, possibleS } func (md *machineDeployment) acquireMachineLease(ctx context.Context, machID string) (*fly.MachineLease, error) { - leaseTimeout := int(md.leaseTimeout) + leaseTimeout := int(md.leaseTimeout.Seconds()) lease, err := md.flapsClient.AcquireLease(ctx, machID, &leaseTimeout) if err != nil { // TODO: tell users how to manually clear the lease diff --git a/internal/command/deploy/plan_test.go b/internal/command/deploy/plan_test.go index 563955731e..2225360d86 100644 --- a/internal/command/deploy/plan_test.go +++ b/internal/command/deploy/plan_test.go @@ -227,6 +227,9 @@ func TestUpdateMachines(t *testing.T) { }, nil }, ReleaseLeaseFunc: func(ctx context.Context, machineID, nonce string) error { + if _, loaded := acquiredLeases.LoadAndDelete(machineID); !loaded { + t.Error("Release lease not found for machine:", machineID) + } return nil }, UpdateFunc: func(ctx context.Context, builder fly.LaunchMachineInput, nonce string) (out *fly.Machine, err error) { @@ -288,10 +291,11 @@ func TestUpdateMachines(t *testing.T) { app: &fly.AppCompact{ Name: "myapp", }, - appConfig: &appconfig.Config{AppName: "myapp"}, - waitTimeout: 10 * time.Second, - deployRetries: 5, - maxUnavailable: 3, + appConfig: &appconfig.Config{AppName: "myapp"}, + waitTimeout: 10 * time.Second, + deployRetries: 5, + maxUnavailable: 3, + skipSmokeChecks: true, } oldAppState := &AppState{ @@ -303,7 +307,7 @@ func TestUpdateMachines(t *testing.T) { settings := updateMachineSettings{ pushForward: true, skipHealthChecks: false, - skipSmokeChecks: false, + skipSmokeChecks: true, skipLeaseAcquisition: false, } diff --git a/internal/command/deploy/statics/addon.go b/internal/command/deploy/statics/addon.go index e9b0967cf8..03c8089165 100644 --- a/internal/command/deploy/statics/addon.go +++ b/internal/command/deploy/statics/addon.go @@ -110,7 +110,7 @@ func (deployer *DeployerState) ensureBucketCreated(ctx context.Context) (tokeniz if retErr != nil { client := flyutil.ClientFromContext(ctx).GenqClient() // Using context.Background() here in case the error is that the context is canceled. - _, err := gql.DeleteAddOn(context.Background(), client, extName) + _, err := gql.DeleteAddOn(context.Background(), client, extName, string(gql.AddOnTypeTigris)) if err != nil { fmt.Fprintf(iostreams.FromContext(ctx).ErrOut, "Failed to delete extension: %v\n", err) } @@ -128,6 +128,9 @@ func (deployer *DeployerState) ensureBucketCreated(ctx context.Context) (tokeniz // TODO(allison): I'd really like ProvisionExtension to return the extension's ID, but for now we can just refetch it extFull, err := gql.GetAddOn(ctx, client.GenqClient(), extName, string(gql.AddOnTypeTigris)) + if err != nil { + return "", err + } // Update the addon with the tokenized key and the name of the app _, err = gql.UpdateAddOn(ctx, client.GenqClient(), extFull.AddOn.Id, extFull.AddOn.AddOnPlan.Id, []string{}, extFull.AddOn.Options, map[string]interface{}{ diff --git a/internal/command/deploy/statics/move.go b/internal/command/deploy/statics/move.go index 36b0b8ad07..4d188c4c42 100644 --- a/internal/command/deploy/statics/move.go +++ b/internal/command/deploy/statics/move.go @@ -61,7 +61,7 @@ func MoveBucket( return err } - _, err = gql.DeleteAddOn(ctx, client.GenqClient(), prevBucket.Name) + _, err = gql.DeleteAddOn(ctx, client.GenqClient(), prevBucket.Name, string(gql.AddOnTypeTigris)) if err != nil { return err } diff --git a/internal/command/deploy/strategy_bluegreen.go b/internal/command/deploy/strategy_bluegreen.go index cd75137870..5aafa00b20 100644 --- a/internal/command/deploy/strategy_bluegreen.go +++ b/internal/command/deploy/strategy_bluegreen.go @@ -175,7 +175,7 @@ func (bg *blueGreen) CreateGreenMachines(ctx context.Context) error { } greenMachine := machine.NewLeasableMachine(bg.flaps, bg.io, newMachineRaw, true) - defer greenMachine.ReleaseLease(ctx) + defer releaseLease(ctx, greenMachine) lock.Lock() defer lock.Unlock() @@ -414,7 +414,7 @@ func (bg *blueGreen) WaitForGreenMachinesToBeHealthy(ctx context.Context) error return } - status := updateMachine.TopLevelChecks() + status := updateMachine.AllHealthChecks() bg.healthLock.Lock() machineIDToHealthStatus[m.FormattedMachineId()] = status bg.healthLock.Unlock() @@ -628,43 +628,6 @@ func (bg *blueGreen) DestroyBlueMachines(ctx context.Context) error { return nil } -func (bg *blueGreen) attachCustomTopLevelChecks() { - for _, entry := range bg.blueMachines { - for _, service := range entry.launchInput.Config.Services { - servicePort := service.InternalPort - serviceProtocol := service.Protocol - - for _, check := range service.Checks { - cc := fly.MachineCheck{ - Port: check.Port, - Type: check.Type, - Interval: check.Interval, - Timeout: check.Timeout, - GracePeriod: check.GracePeriod, - HTTPMethod: check.HTTPMethod, - HTTPPath: check.HTTPPath, - HTTPProtocol: check.HTTPProtocol, - HTTPSkipTLSVerify: check.HTTPSkipTLSVerify, - HTTPHeaders: check.HTTPHeaders, - } - - if cc.Port == nil { - cc.Port = &servicePort - } - - if cc.Type == nil { - cc.Type = &serviceProtocol - } - - if entry.launchInput.Config.Checks == nil { - entry.launchInput.Config.Checks = make(map[string]fly.MachineCheck) - } - entry.launchInput.Config.Checks[fmt.Sprintf("bg_deployments_%s", *check.Type)] = cc - } - } - } -} - func (bg *blueGreen) Deploy(ctx context.Context) error { ctx, span := tracing.GetTracer().Start(ctx, "bluegreen") defer span.End() @@ -696,19 +659,24 @@ func (bg *blueGreen) Deploy(ctx context.Context) error { return err } - bg.attachCustomTopLevelChecks() - - totalChecks := 0 + totalMachinesWithChecks := 0 for _, entry := range bg.blueMachines { - if len(entry.launchInput.Config.Checks) == 0 { + machineChecks := len(entry.launchInput.Config.Checks) + + // Also count service-level checks + for _, service := range entry.launchInput.Config.Services { + machineChecks += len(service.Checks) + } + + if machineChecks == 0 { fmt.Fprintf(bg.io.ErrOut, "\n[WARN] Machine %s doesn't have healthchecks setup. We won't check its health.", entry.leasableMachine.FormattedMachineId()) continue } - totalChecks++ + totalMachinesWithChecks++ } - if totalChecks == 0 && len(bg.blueMachines) != 0 { + if totalMachinesWithChecks == 0 && len(bg.blueMachines) != 0 { fmt.Fprintf(bg.io.ErrOut, "\n\nYou need to define at least 1 check in order to use blue-green deployments. Refer to https://fly.io/docs/reference/configuration/#services-tcp_checks\n") return ErrValidationError } diff --git a/internal/command/deploy/strategy_bluegreen_test.go b/internal/command/deploy/strategy_bluegreen_test.go index e374bfb348..20dede0c27 100644 --- a/internal/command/deploy/strategy_bluegreen_test.go +++ b/internal/command/deploy/strategy_bluegreen_test.go @@ -32,6 +32,7 @@ func newBlueGreenStrategy(client flapsutil.FlapsClient, numberOfExistingMachines "check1": {}, }, }, + MinSecretsVersion: nil, }, }) } diff --git a/internal/command/extensions/arcjet/create.go b/internal/command/extensions/arcjet/create.go index 6445f33d9d..a6d63fe819 100644 --- a/internal/command/extensions/arcjet/create.go +++ b/internal/command/extensions/arcjet/create.go @@ -67,7 +67,11 @@ func runCreate(ctx context.Context) (err error) { } if extension.SetsSecrets { - err = secrets.DeploySecrets(ctx, gql.ToAppCompact(*extension.App), false, false) + err = secrets.DeploySecrets(ctx, gql.ToAppCompact(*extension.App), secrets.DeploymentArgs{ + Stage: false, + Detach: false, + CheckDNS: true, + }) } return diff --git a/internal/command/extensions/arcjet/destroy.go b/internal/command/extensions/arcjet/destroy.go index befe8ac107..e7afeb268e 100644 --- a/internal/command/extensions/arcjet/destroy.go +++ b/internal/command/extensions/arcjet/destroy.go @@ -67,7 +67,7 @@ func runDestroy(ctx context.Context) (err error) { client = flyutil.ClientFromContext(ctx).GenqClient() ) - _, err = gql.DeleteAddOn(ctx, client, extension.Name) + _, err = gql.DeleteAddOn(ctx, client, extension.Name, string(gql.AddOnTypeArcjet)) if err != nil { return diff --git a/internal/command/extensions/core/core.go b/internal/command/extensions/core/core.go index fa267890c6..2f6d5db544 100644 --- a/internal/command/extensions/core/core.go +++ b/internal/command/extensions/core/core.go @@ -313,6 +313,12 @@ func WaitForProvision(ctx context.Context, name string, provider string) error { return err } + // Validate that the returned add-on matches the expected provider type + if resp.AddOn.AddOnProvider.Name != provider { + return fmt.Errorf("found add-on '%s' with provider '%s', but expected provider '%s'", + resp.AddOn.Name, resp.AddOn.AddOnProvider.Name, provider) + } + if resp.AddOn.Status == "error" { if resp.AddOn.ErrorMessage != "" { return errors.New(resp.AddOn.ErrorMessage) @@ -387,6 +393,12 @@ func OpenDashboard(ctx context.Context, extensionName string, provider gql.AddOn return err } + // Validate that the returned add-on matches the expected provider type + if result.AddOn.AddOnProvider.Name != string(provider) { + return fmt.Errorf("found add-on '%s' with provider '%s', but expected provider '%s'", + result.AddOn.Name, result.AddOn.AddOnProvider.Name, provider) + } + err = AgreeToProviderTos(ctx, result.AddOn.AddOnProvider.ExtensionProviderData) if err != nil { return err @@ -408,6 +420,12 @@ func Discover(ctx context.Context, provider gql.AddOnType) (addOn *gql.AddOnData return nil, nil, err } + // Validate that the returned add-on matches the expected provider type + if response.AddOn.AddOnProvider.Name != string(provider) { + return nil, nil, fmt.Errorf("found add-on '%s' with provider '%s', but expected provider '%s'", + response.AddOn.Name, response.AddOn.AddOnProvider.Name, provider) + } + addOn = &response.AddOn.AddOnData } else if appName != "" { @@ -431,9 +449,9 @@ func Discover(ctx context.Context, provider gql.AddOnType) (addOn *gql.AddOnData func setSecretsFromExtension(ctx context.Context, app *gql.AppData, extension *Extension, overrideSecretKeyNamesMap map[string]string) (err error) { var ( - io = iostreams.FromContext(ctx) - client = flyutil.ClientFromContext(ctx).GenqClient() - setSecrets bool = true + io = iostreams.FromContext(ctx) + client = flyutil.ClientFromContext(ctx).GenqClient() + setSecrets = true ) environment := extension.Data.Environment @@ -512,7 +530,14 @@ func AgreedToProviderTos(ctx context.Context, providerName string) (bool, error) if err != nil { return false, err } - return tosResp.Viewer.(*gql.AgreedToProviderTosViewerUser).AgreedToProviderTos, nil + + viewerUser, ok := tosResp.Viewer.(*gql.AgreedToProviderTosViewerUser) + if ok { + return viewerUser.AgreedToProviderTos, nil + } else { + // If we are unable to determine if the user has agreed to the provider ToS, return false + return false, nil + } } func Status(ctx context.Context, provider gql.AddOnType) (err error) { @@ -531,7 +556,7 @@ func Status(ctx context.Context, provider gql.AddOnType) (err error) { }, } - var cols []string = []string{"Name", "Primary Region", "Status"} + var cols = []string{"Name", "Primary Region", "Status"} if app != nil { obj[0] = append(obj[0], app.Name) @@ -663,4 +688,5 @@ var PlatformMap = map[string]string{ "Remix": "javascript-remix", "Remix/Prisma": "javascript-remix", "Ruby": "ruby", + "Shopify": "javascript-remix", } diff --git a/internal/command/extensions/enveloop/create.go b/internal/command/extensions/enveloop/create.go deleted file mode 100644 index ca212b88f1..0000000000 --- a/internal/command/extensions/enveloop/create.go +++ /dev/null @@ -1,65 +0,0 @@ -package enveloop - -import ( - "context" - - "github.com/spf13/cobra" - "github.com/superfly/flyctl/gql" - "github.com/superfly/flyctl/internal/appconfig" - "github.com/superfly/flyctl/internal/command" - extensions_core "github.com/superfly/flyctl/internal/command/extensions/core" - "github.com/superfly/flyctl/internal/command/orgs" - "github.com/superfly/flyctl/internal/command/secrets" - "github.com/superfly/flyctl/internal/flag" -) - -func create() (cmd *cobra.Command) { - const ( - short = "Provision an Enveloop project" - long = short + "\n" - ) - - cmd = command.New("create", short, long, runCreate, command.RequireSession, command.LoadAppNameIfPresent) - flag.Add(cmd, - flag.App(), - flag.AppConfig(), - flag.Org(), - flag.Region(), - extensions_core.SharedFlags, - SharedFlags, - flag.String{ - Name: "name", - Shorthand: "n", - Description: "The name of your project", - }, - ) - return cmd -} - -func runCreate(ctx context.Context) (err error) { - appName := appconfig.NameFromContext(ctx) - params := extensions_core.ExtensionParams{} - - if appName != "" { - params.AppName = appName - } else { - org, err := orgs.OrgFromFlagOrSelect(ctx) - if err != nil { - return err - } - - params.Organization = org - } - - params.Provider = "enveloop" - extension, err := extensions_core.ProvisionExtension(ctx, params) - if err != nil { - return err - } - - if extension.SetsSecrets { - err = secrets.DeploySecrets(ctx, gql.ToAppCompact(*extension.App), false, false) - } - - return err -} diff --git a/internal/command/extensions/enveloop/dashboard.go b/internal/command/extensions/enveloop/dashboard.go deleted file mode 100644 index 0f602e6c1b..0000000000 --- a/internal/command/extensions/enveloop/dashboard.go +++ /dev/null @@ -1,43 +0,0 @@ -package enveloop - -import ( - "context" - - "github.com/spf13/cobra" - "github.com/superfly/flyctl/gql" - "github.com/superfly/flyctl/internal/command" - extensions_core "github.com/superfly/flyctl/internal/command/extensions/core" - "github.com/superfly/flyctl/internal/flag" -) - -func dashboard() (cmd *cobra.Command) { - const ( - long = `Open the Enveloop dashboard via your web browser` - - short = long - usage = "dashboard" - ) - - cmd = command.New(usage, short, long, runDashboard, command.RequireSession, command.LoadAppNameIfPresent) - - flag.Add(cmd, - flag.App(), - flag.AppConfig(), - flag.Org(), - extensions_core.SharedFlags, - ) - cmd.Args = cobra.NoArgs - return cmd -} - -func runDashboard(ctx context.Context) (err error) { - if org := flag.GetOrg(ctx); org != "" { - return extensions_core.OpenOrgDashboard(ctx, org, "enveloop") - } - - extension, _, err := extensions_core.Discover(ctx, gql.AddOnTypeEnveloop) - if err != nil { - return err - } - return extensions_core.OpenDashboard(ctx, extension.Name, gql.AddOnTypeEnveloop) -} diff --git a/internal/command/extensions/enveloop/destroy.go b/internal/command/extensions/enveloop/destroy.go deleted file mode 100644 index 5242a65a2b..0000000000 --- a/internal/command/extensions/enveloop/destroy.go +++ /dev/null @@ -1,72 +0,0 @@ -package enveloop - -import ( - "context" - "fmt" - - "github.com/spf13/cobra" - "github.com/superfly/flyctl/gql" - "github.com/superfly/flyctl/internal/command" - extensions_core "github.com/superfly/flyctl/internal/command/extensions/core" - "github.com/superfly/flyctl/internal/flag" - "github.com/superfly/flyctl/internal/flyutil" - "github.com/superfly/flyctl/internal/prompt" - "github.com/superfly/flyctl/iostreams" -) - -func destroy() (cmd *cobra.Command) { - const ( - long = `Permanently destroy an Enveloop project` - - short = long - usage = "destroy [name]" - ) - - cmd = command.New(usage, short, long, runDestroy, command.RequireSession, command.LoadAppNameIfPresent) - - cmd.Args = cobra.MaximumNArgs(1) - - flag.Add(cmd, - flag.App(), - flag.AppConfig(), - extensions_core.SharedFlags, - ) - - return cmd -} - -func runDestroy(ctx context.Context) (err error) { - io := iostreams.FromContext(ctx) - colorize := io.ColorScheme() - - extension, _, err := extensions_core.Discover(ctx, gql.AddOnTypeEnveloop) - if err != nil { - return err - } - - if !flag.GetYes(ctx) { - const msg = "Destroying an Enveloop project is not reversible. All Enveloop templates, message settings, and message logs will be lost." - fmt.Fprintln(io.ErrOut, colorize.Red(msg)) - - switch confirmed, err := prompt.Confirmf(ctx, "Do you want to destroy the Enveloop project named %s?", extension.Name); { - case err == nil: - if !confirmed { - return nil - } - case prompt.IsNonInteractive(err): - return prompt.NonInteractiveError("yes flag must be specified when not running interactively") - default: - return err - } - } - - client := flyutil.ClientFromContext(ctx).GenqClient() - if _, err := gql.DeleteAddOn(ctx, client, extension.Name); err != nil { - return err - } - - out := iostreams.FromContext(ctx).Out - fmt.Fprintf(out, "Your Enveloop project %s was destroyed\n", extension.Name) - - return nil -} diff --git a/internal/command/extensions/enveloop/enveloop.go b/internal/command/extensions/enveloop/enveloop.go deleted file mode 100644 index c356f1ce91..0000000000 --- a/internal/command/extensions/enveloop/enveloop.go +++ /dev/null @@ -1,21 +0,0 @@ -package enveloop - -import ( - "github.com/spf13/cobra" - "github.com/superfly/flyctl/internal/command" - "github.com/superfly/flyctl/internal/flag" -) - -func New() (cmd *cobra.Command) { - const ( - short = "Provision and manage Enveloop projects" - long = short + "\n" - ) - - cmd = command.New("enveloop", short, long, nil) - cmd.AddCommand(create(), list(), dashboard(), destroy(), status()) - - return cmd -} - -var SharedFlags = flag.Set{} diff --git a/internal/command/extensions/enveloop/list.go b/internal/command/extensions/enveloop/list.go deleted file mode 100644 index e036a4d262..0000000000 --- a/internal/command/extensions/enveloop/list.go +++ /dev/null @@ -1,53 +0,0 @@ -package enveloop - -import ( - "context" - - "github.com/spf13/cobra" - "github.com/superfly/flyctl/gql" - "github.com/superfly/flyctl/internal/command" - extensions_core "github.com/superfly/flyctl/internal/command/extensions/core" - "github.com/superfly/flyctl/internal/flag" - "github.com/superfly/flyctl/internal/flyutil" - "github.com/superfly/flyctl/internal/render" - "github.com/superfly/flyctl/iostreams" -) - -func list() (cmd *cobra.Command) { - const ( - long = `List your Enveloop projects` - short = long - usage = "list" - ) - - cmd = command.New(usage, short, long, runList, command.RequireSession) - cmd.Aliases = []string{"ls"} - - flag.Add(cmd, - flag.Org(), - extensions_core.SharedFlags, - ) - return cmd -} - -func runList(ctx context.Context) (err error) { - client := flyutil.ClientFromContext(ctx).GenqClient() - response, err := gql.ListAddOns(ctx, client, "enveloop") - if err != nil { - return err - } - - var rows [][]string - for _, extension := range response.AddOns.Nodes { - rows = append(rows, []string{ - extension.Name, - extension.Organization.Slug, - extension.PrimaryRegion, - }) - } - - out := iostreams.FromContext(ctx).Out - _ = render.Table(out, "", rows, "Name", "Org", "Region") - - return nil -} diff --git a/internal/command/extensions/enveloop/status.go b/internal/command/extensions/enveloop/status.go deleted file mode 100644 index dfa898369b..0000000000 --- a/internal/command/extensions/enveloop/status.go +++ /dev/null @@ -1,65 +0,0 @@ -package enveloop - -import ( - "context" - - "github.com/spf13/cobra" - "github.com/superfly/flyctl/gql" - "github.com/superfly/flyctl/internal/command" - extensions_core "github.com/superfly/flyctl/internal/command/extensions/core" - "github.com/superfly/flyctl/internal/flag" - "github.com/superfly/flyctl/internal/render" - "github.com/superfly/flyctl/iostreams" -) - -func status() *cobra.Command { - const ( - short = "Show details about an Enveloop project" - long = short + "\n" - - usage = "status [name]" - ) - - cmd := command.New(usage, short, long, runStatus, - command.RequireSession, command.LoadAppNameIfPresent, - ) - - cmd.Args = cobra.MaximumNArgs(1) - - flag.Add(cmd, - flag.App(), - flag.AppConfig(), - extensions_core.SharedFlags, - ) - - return cmd -} - -func runStatus(ctx context.Context) (err error) { - io := iostreams.FromContext(ctx) - - extension, app, err := extensions_core.Discover(ctx, gql.AddOnTypeEnveloop) - if err != nil { - return err - } - - obj := [][]string{ - { - extension.Name, - extension.Status, - extension.PrimaryRegion, - }, - } - - var cols []string = []string{"Name", "Status", "Region"} - - if app != nil { - obj[0] = append(obj[0], app.Name) - cols = append(cols, "App") - } - - if err = render.VerticalTable(io.Out, "Status", obj, cols...); err != nil { - return - } - return -} diff --git a/internal/command/extensions/extensions.go b/internal/command/extensions/extensions.go index 57402f9d2b..d1dfea04a0 100644 --- a/internal/command/extensions/extensions.go +++ b/internal/command/extensions/extensions.go @@ -6,9 +6,7 @@ import ( "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/command/extensions/arcjet" - "github.com/superfly/flyctl/internal/command/extensions/enveloop" "github.com/superfly/flyctl/internal/command/extensions/fly_mysql" - "github.com/superfly/flyctl/internal/command/extensions/kafka" "github.com/superfly/flyctl/internal/command/extensions/kubernetes" sentry_ext "github.com/superfly/flyctl/internal/command/extensions/sentry" "github.com/superfly/flyctl/internal/command/extensions/supabase" @@ -32,9 +30,7 @@ func New() (cmd *cobra.Command) { supabase.New(), tigris.New(), kubernetes.New(), - kafka.New(), vector.New(), - enveloop.New(), arcjet.New(), fly_mysql.New(), wafris.New(), diff --git a/internal/command/extensions/fly_mysql/create.go b/internal/command/extensions/fly_mysql/create.go index 72f4c61ead..7134ef0471 100644 --- a/internal/command/extensions/fly_mysql/create.go +++ b/internal/command/extensions/fly_mysql/create.go @@ -71,7 +71,11 @@ func runCreate(ctx context.Context) (err error) { } if extension.SetsSecrets { - err = secrets.DeploySecrets(ctx, gql.ToAppCompact(*extension.App), false, false) + err = secrets.DeploySecrets(ctx, gql.ToAppCompact(*extension.App), secrets.DeploymentArgs{ + Stage: false, + Detach: false, + CheckDNS: true, + }) } return diff --git a/internal/command/extensions/fly_mysql/destroy.go b/internal/command/extensions/fly_mysql/destroy.go index bab081a07e..a17a3702a3 100644 --- a/internal/command/extensions/fly_mysql/destroy.go +++ b/internal/command/extensions/fly_mysql/destroy.go @@ -67,7 +67,7 @@ func runDestroy(ctx context.Context) (err error) { client = flyutil.ClientFromContext(ctx).GenqClient() ) - _, err = gql.DeleteAddOn(ctx, client, extension.Name) + _, err = gql.DeleteAddOn(ctx, client, extension.Name, string(gql.AddOnTypeFlyMysql)) if err != nil { return diff --git a/internal/command/extensions/kafka/create.go b/internal/command/extensions/kafka/create.go deleted file mode 100644 index 6f96027ce6..0000000000 --- a/internal/command/extensions/kafka/create.go +++ /dev/null @@ -1,69 +0,0 @@ -package kafka - -import ( - "context" - - "github.com/spf13/cobra" - "github.com/superfly/flyctl/gql" - "github.com/superfly/flyctl/internal/appconfig" - "github.com/superfly/flyctl/internal/command" - extensions_core "github.com/superfly/flyctl/internal/command/extensions/core" - "github.com/superfly/flyctl/internal/command/orgs" - "github.com/superfly/flyctl/internal/command/secrets" - "github.com/superfly/flyctl/internal/flag" -) - -func create() (cmd *cobra.Command) { - const ( - short = "Provision a Upstash Kafka cluster" - long = short + "\n" - ) - - cmd = command.New("create", short, long, runCreate, command.RequireSession, command.LoadAppNameIfPresent) - flag.Add(cmd, - flag.App(), - flag.AppConfig(), - flag.Org(), - flag.Region(), - extensions_core.SharedFlags, - SharedFlags, - flag.String{ - Name: "name", - Shorthand: "n", - Description: "The name of your cluster", - }, - ) - return cmd -} - -func runCreate(ctx context.Context) (err error) { - appName := appconfig.NameFromContext(ctx) - params := extensions_core.ExtensionParams{} - - if appName != "" { - params.AppName = appName - } else { - org, err := orgs.OrgFromFlagOrSelect(ctx) - if err != nil { - return err - } - - params.Organization = org - } - - var options gql.AddOnOptions - - params.Options = options - params.PlanID = "qgaV5wZgnN553c2LQ4yOJR10" // PAYG is the only plan for now - params.Provider = "upstash_kafka" - extension, err := extensions_core.ProvisionExtension(ctx, params) - if err != nil { - return err - } - - if extension.SetsSecrets { - err = secrets.DeploySecrets(ctx, gql.ToAppCompact(*extension.App), false, false) - } - - return err -} diff --git a/internal/command/extensions/kafka/dashboard.go b/internal/command/extensions/kafka/dashboard.go deleted file mode 100644 index 393ab07f31..0000000000 --- a/internal/command/extensions/kafka/dashboard.go +++ /dev/null @@ -1,43 +0,0 @@ -package kafka - -import ( - "context" - - "github.com/spf13/cobra" - "github.com/superfly/flyctl/gql" - "github.com/superfly/flyctl/internal/command" - extensions_core "github.com/superfly/flyctl/internal/command/extensions/core" - "github.com/superfly/flyctl/internal/flag" -) - -func dashboard() (cmd *cobra.Command) { - const ( - long = `Visit the Upstash Kafka dashboard on the Upstash web console` - - short = long - usage = "dashboard" - ) - - cmd = command.New(usage, short, long, runDashboard, command.RequireSession, command.LoadAppNameIfPresent) - - flag.Add(cmd, - flag.App(), - flag.AppConfig(), - flag.Org(), - extensions_core.SharedFlags, - ) - cmd.Args = cobra.NoArgs - return cmd -} - -func runDashboard(ctx context.Context) (err error) { - if org := flag.GetOrg(ctx); org != "" { - return extensions_core.OpenOrgDashboard(ctx, org, "upstash_kafka") - } - - extension, _, err := extensions_core.Discover(ctx, gql.AddOnTypeUpstashKafka) - if err != nil { - return err - } - return extensions_core.OpenDashboard(ctx, extension.Name, gql.AddOnTypeUpstashKafka) -} diff --git a/internal/command/extensions/kafka/destroy.go b/internal/command/extensions/kafka/destroy.go deleted file mode 100644 index 48e138c101..0000000000 --- a/internal/command/extensions/kafka/destroy.go +++ /dev/null @@ -1,72 +0,0 @@ -package kafka - -import ( - "context" - "fmt" - - "github.com/spf13/cobra" - "github.com/superfly/flyctl/gql" - "github.com/superfly/flyctl/internal/command" - extensions_core "github.com/superfly/flyctl/internal/command/extensions/core" - "github.com/superfly/flyctl/internal/flag" - "github.com/superfly/flyctl/internal/flyutil" - "github.com/superfly/flyctl/internal/prompt" - "github.com/superfly/flyctl/iostreams" -) - -func destroy() (cmd *cobra.Command) { - const ( - long = `Permanently destroy an Upstash Kafka cluster` - - short = long - usage = "destroy [name]" - ) - - cmd = command.New(usage, short, long, runDestroy, command.RequireSession, command.LoadAppNameIfPresent) - - cmd.Args = cobra.MaximumNArgs(1) - - flag.Add(cmd, - flag.App(), - flag.AppConfig(), - extensions_core.SharedFlags, - ) - - return cmd -} - -func runDestroy(ctx context.Context) (err error) { - io := iostreams.FromContext(ctx) - colorize := io.ColorScheme() - - extension, _, err := extensions_core.Discover(ctx, gql.AddOnTypeUpstashKafka) - if err != nil { - return err - } - - if !flag.GetYes(ctx) { - const msg = "Destroying an upstash Kafka cluster is not reversible." - fmt.Fprintln(io.ErrOut, colorize.Red(msg)) - - switch confirmed, err := prompt.Confirmf(ctx, "Do you want to destroy the cluster named %s?", extension.Name); { - case err == nil: - if !confirmed { - return nil - } - case prompt.IsNonInteractive(err): - return prompt.NonInteractiveError("yes flag must be specified when not running interactively") - default: - return err - } - } - - client := flyutil.ClientFromContext(ctx).GenqClient() - if _, err := gql.DeleteAddOn(ctx, client, extension.Name); err != nil { - return err - } - - out := iostreams.FromContext(ctx).Out - fmt.Fprintf(out, "Your Upstash Kafka cluster %s was destroyed\n", extension.Name) - - return nil -} diff --git a/internal/command/extensions/kafka/kafka.go b/internal/command/extensions/kafka/kafka.go deleted file mode 100644 index 96db1ba2ea..0000000000 --- a/internal/command/extensions/kafka/kafka.go +++ /dev/null @@ -1,21 +0,0 @@ -package kafka - -import ( - "github.com/spf13/cobra" - "github.com/superfly/flyctl/internal/command" - "github.com/superfly/flyctl/internal/flag" -) - -func New() (cmd *cobra.Command) { - const ( - short = "Provision and manage Upstash Kafka clusters" - long = short + "\n" - ) - - cmd = command.New("kafka", short, long, nil) - cmd.AddCommand(create(), update(), list(), dashboard(), destroy(), status()) - - return cmd -} - -var SharedFlags = flag.Set{} diff --git a/internal/command/extensions/kafka/list.go b/internal/command/extensions/kafka/list.go deleted file mode 100644 index f81dfc6c28..0000000000 --- a/internal/command/extensions/kafka/list.go +++ /dev/null @@ -1,53 +0,0 @@ -package kafka - -import ( - "context" - - "github.com/spf13/cobra" - "github.com/superfly/flyctl/gql" - "github.com/superfly/flyctl/internal/command" - extensions_core "github.com/superfly/flyctl/internal/command/extensions/core" - "github.com/superfly/flyctl/internal/flag" - "github.com/superfly/flyctl/internal/flyutil" - "github.com/superfly/flyctl/internal/render" - "github.com/superfly/flyctl/iostreams" -) - -func list() (cmd *cobra.Command) { - const ( - long = `List your Upstash Kafka clusters` - short = long - usage = "list" - ) - - cmd = command.New(usage, short, long, runList, command.RequireSession) - cmd.Aliases = []string{"ls"} - - flag.Add(cmd, - flag.Org(), - extensions_core.SharedFlags, - ) - return cmd -} - -func runList(ctx context.Context) (err error) { - client := flyutil.ClientFromContext(ctx).GenqClient() - response, err := gql.ListAddOns(ctx, client, "upstash_kafka") - if err != nil { - return err - } - - var rows [][]string - for _, extension := range response.AddOns.Nodes { - rows = append(rows, []string{ - extension.Name, - extension.Organization.Slug, - extension.PrimaryRegion, - }) - } - - out := iostreams.FromContext(ctx).Out - _ = render.Table(out, "", rows, "Name", "Org", "Region") - - return nil -} diff --git a/internal/command/extensions/kafka/status.go b/internal/command/extensions/kafka/status.go deleted file mode 100644 index 6976983ef2..0000000000 --- a/internal/command/extensions/kafka/status.go +++ /dev/null @@ -1,65 +0,0 @@ -package kafka - -import ( - "context" - - "github.com/spf13/cobra" - "github.com/superfly/flyctl/gql" - "github.com/superfly/flyctl/internal/command" - extensions_core "github.com/superfly/flyctl/internal/command/extensions/core" - "github.com/superfly/flyctl/internal/flag" - "github.com/superfly/flyctl/internal/render" - "github.com/superfly/flyctl/iostreams" -) - -func status() *cobra.Command { - const ( - short = "Show details about an Upstash Kafka cluster" - long = short + "\n" - - usage = "status [name]" - ) - - cmd := command.New(usage, short, long, runStatus, - command.RequireSession, command.LoadAppNameIfPresent, - ) - - cmd.Args = cobra.MaximumNArgs(1) - - flag.Add(cmd, - flag.App(), - flag.AppConfig(), - extensions_core.SharedFlags, - ) - - return cmd -} - -func runStatus(ctx context.Context) (err error) { - io := iostreams.FromContext(ctx) - - extension, app, err := extensions_core.Discover(ctx, gql.AddOnTypeUpstashKafka) - if err != nil { - return err - } - - obj := [][]string{ - { - extension.Name, - extension.Status, - extension.PrimaryRegion, - }, - } - - var cols []string = []string{"Name", "Status", "Region"} - - if app != nil { - obj[0] = append(obj[0], app.Name) - cols = append(cols, "App") - } - - if err = render.VerticalTable(io.Out, "Status", obj, cols...); err != nil { - return - } - return -} diff --git a/internal/command/extensions/kafka/update.go b/internal/command/extensions/kafka/update.go deleted file mode 100644 index 17a3d069f4..0000000000 --- a/internal/command/extensions/kafka/update.go +++ /dev/null @@ -1,51 +0,0 @@ -package kafka - -import ( - "context" - - "github.com/spf13/cobra" - "github.com/superfly/flyctl/gql" - "github.com/superfly/flyctl/internal/command" - extensions_core "github.com/superfly/flyctl/internal/command/extensions/core" - "github.com/superfly/flyctl/internal/flag" - "github.com/superfly/flyctl/internal/flyutil" -) - -func update() (cmd *cobra.Command) { - const ( - short = "Update an existing Upstash Kafka cluster" - long = short + "\n" - ) - - cmd = command.New("update ", short, long, runUpdate, command.RequireSession, command.LoadAppNameIfPresent) - flag.Add(cmd, - flag.App(), - flag.AppConfig(), - flag.Org(), - extensions_core.SharedFlags, - SharedFlags, - ) - return cmd -} - -func runUpdate(ctx context.Context) (err error) { - client := flyutil.ClientFromContext(ctx).GenqClient() - - id := flag.FirstArg(ctx) - response, err := gql.GetAddOn(ctx, client, id, string(gql.AddOnTypeUpstashKafka)) - if err != nil { - return - } - addOn := response.AddOn - - options, _ := addOn.Options.(map[string]interface{}) - if options == nil { - options = make(map[string]interface{}) - } - - _, err = gql.UpdateAddOn(ctx, client, addOn.Id, addOn.AddOnPlan.Id, []string{}, options, addOn.Metadata) - if err != nil { - return - } - return runStatus(ctx) -} diff --git a/internal/command/extensions/kubernetes/destroy.go b/internal/command/extensions/kubernetes/destroy.go index a21a7d62e3..f5f5ae04b5 100644 --- a/internal/command/extensions/kubernetes/destroy.go +++ b/internal/command/extensions/kubernetes/destroy.go @@ -66,7 +66,7 @@ func runDestroy(ctx context.Context) (err error) { client = flyutil.ClientFromContext(ctx).GenqClient() ) - _, err = gql.DeleteAddOn(ctx, client, extension.Name) + _, err = gql.DeleteAddOn(ctx, client, extension.Name, string(gql.AddOnTypeKubernetes)) if err != nil { return diff --git a/internal/command/extensions/kubernetes/kubeconfig.go b/internal/command/extensions/kubernetes/kubeconfig.go index dec94e865b..b0ea15039a 100644 --- a/internal/command/extensions/kubernetes/kubeconfig.go +++ b/internal/command/extensions/kubernetes/kubeconfig.go @@ -42,7 +42,10 @@ func runSaveKubeconfig(ctx context.Context) error { } metadata := resp.AddOn.Metadata.(map[string]interface{}) - kubeconfig := metadata["kubeconfig"].(string) + kubeconfig, ok := metadata["kubeconfig"].(string) + if !ok { + return fmt.Errorf("Failed to fetch kubeconfig. If provisioning your cluster failed you may have to delete it and reprovision it.") + } outFilename := flag.GetString(ctx, "output") if outFilename == "" { diff --git a/internal/command/extensions/sentry/create.go b/internal/command/extensions/sentry/create.go index 197bfa90bf..621dae4a1f 100644 --- a/internal/command/extensions/sentry/create.go +++ b/internal/command/extensions/sentry/create.go @@ -36,7 +36,11 @@ func runSentryCreate(ctx context.Context) (err error) { Provider: "sentry", }) if extension.SetsSecrets { - err = secrets.DeploySecrets(ctx, gql.ToAppCompact(*extension.App), false, false) + err = secrets.DeploySecrets(ctx, gql.ToAppCompact(*extension.App), secrets.DeploymentArgs{ + Stage: false, + Detach: false, + CheckDNS: true, + }) } return } diff --git a/internal/command/extensions/sentry/destroy.go b/internal/command/extensions/sentry/destroy.go index 4a79c8343b..02d6092e52 100644 --- a/internal/command/extensions/sentry/destroy.go +++ b/internal/command/extensions/sentry/destroy.go @@ -67,7 +67,7 @@ func runDestroy(ctx context.Context) (err error) { client = flyutil.ClientFromContext(ctx).GenqClient() ) - _, err = gql.DeleteAddOn(ctx, client, extension.Name) + _, err = gql.DeleteAddOn(ctx, client, extension.Name, string(gql.AddOnTypeSentry)) if err != nil { return diff --git a/internal/command/extensions/supabase/create.go b/internal/command/extensions/supabase/create.go deleted file mode 100644 index 2d5915071a..0000000000 --- a/internal/command/extensions/supabase/create.go +++ /dev/null @@ -1,115 +0,0 @@ -package supabase - -import ( - "context" - "fmt" - "regexp" - "strings" - - "github.com/spf13/cobra" - "github.com/superfly/flyctl/gql" - "github.com/superfly/flyctl/internal/appconfig" - "github.com/superfly/flyctl/internal/command" - extensions_core "github.com/superfly/flyctl/internal/command/extensions/core" - "github.com/superfly/flyctl/internal/command/orgs" - "github.com/superfly/flyctl/internal/command/secrets" - "github.com/superfly/flyctl/internal/flag" - "github.com/superfly/flyctl/internal/prompt" - "github.com/superfly/flyctl/iostreams" -) - -func create() (cmd *cobra.Command) { - - const ( - short = "Provision a Supabase Postgres database" - long = short + "\n" - ) - - cmd = command.New("create", short, long, runCreate, command.RequireSession, command.LoadAppNameIfPresent) - flag.Add(cmd, - flag.App(), - flag.AppConfig(), - flag.Org(), - flag.Region(), - extensions_core.SharedFlags, - flag.String{ - Name: "name", - Shorthand: "n", - Description: "The name of your database", - }, - ) - return cmd -} - -func CaptureFreeLimitError(ctx context.Context, provisioningError error, params *extensions_core.ExtensionParams) error { - io := iostreams.FromContext(ctx) - - if provisioningError != nil && strings.Contains(provisioningError.Error(), "limited to one") { - - pattern := `named\s+'([^']*)'` - - // Compile the regular expression - re := regexp.MustCompile(pattern) - - // Find all matches - matches := re.FindAllStringSubmatch(provisioningError.Error(), -1) - - var orgName string - - if len(matches) > 0 && len(matches[0]) > 1 { - orgName = matches[0][1] - } else { - fmt.Println("No match found") - } - - fmt.Fprintf(io.Out, "\nYou're limited to one free Supabase database through Fly.io, across all orgs. Your org '%s' already has a free database.\n\nTo provision another, you can upgrade the '%s' organization to the $25/mo Pro Plan. Get pricing details at https://supabase.com/docs/guides/platform/org-based-billing.\n\n", orgName, params.Organization.Name) - confirm, err := prompt.Confirm(ctx, fmt.Sprintf("Would you like to upgrade your Supabase org '%s' now ($25/mo, prorated) and launch a database?", params.Organization.Name)) - - if err != nil { - return err - } - - if confirm { - params.OrganizationPlanID = "pro" - _, err := extensions_core.ProvisionExtension(ctx, *params) - - if err != nil { - return err - } - } - } - - return provisioningError -} - -func runCreate(ctx context.Context) (err error) { - appName := appconfig.NameFromContext(ctx) - - params := extensions_core.ExtensionParams{} - - if appName != "" { - params.AppName = appName - } else { - org, err := orgs.OrgFromFlagOrSelect(ctx) - - if err != nil { - return err - } - - params.Organization = org - } - - params.Provider = "supabase" - params.ErrorCaptureCallback = CaptureFreeLimitError - extension, err := extensions_core.ProvisionExtension(ctx, params) - - if err != nil { - return err - } - - if extension.SetsSecrets { - err = secrets.DeploySecrets(ctx, gql.ToAppCompact(*extension.App), false, false) - } - - return -} diff --git a/internal/command/extensions/supabase/destroy.go b/internal/command/extensions/supabase/destroy.go index a550f592e1..98ca08714b 100644 --- a/internal/command/extensions/supabase/destroy.go +++ b/internal/command/extensions/supabase/destroy.go @@ -67,7 +67,7 @@ func runDestroy(ctx context.Context) (err error) { client = flyutil.ClientFromContext(ctx).GenqClient() ) - _, err = gql.DeleteAddOn(ctx, client, extension.Name) + _, err = gql.DeleteAddOn(ctx, client, extension.Name, string(gql.AddOnTypeSupabase)) if err != nil { return diff --git a/internal/command/extensions/supabase/supabase.go b/internal/command/extensions/supabase/supabase.go index 9a330a7966..440fb7ea0a 100644 --- a/internal/command/extensions/supabase/supabase.go +++ b/internal/command/extensions/supabase/supabase.go @@ -13,7 +13,7 @@ func New() (cmd *cobra.Command) { ) cmd = command.New("supabase", short, long, nil) - cmd.AddCommand(create(), destroy(), dashboard(), list(), status()) + cmd.AddCommand(destroy(), dashboard(), list(), status()) return cmd } diff --git a/internal/command/extensions/tigris/create.go b/internal/command/extensions/tigris/create.go index e462e69af7..cc6378cb1e 100644 --- a/internal/command/extensions/tigris/create.go +++ b/internal/command/extensions/tigris/create.go @@ -12,6 +12,7 @@ import ( "github.com/superfly/flyctl/internal/command/orgs" "github.com/superfly/flyctl/internal/command/secrets" "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flapsutil" ) func create() (cmd *cobra.Command) { @@ -96,7 +97,15 @@ func runCreate(ctx context.Context) (err error) { } if extension.SetsSecrets { - err = secrets.DeploySecrets(ctx, gql.ToAppCompact(*extension.App), false, false) + ctx, _, _, err = flapsutil.SetClient(ctx, nil, extension.App.Name) + if err != nil { + return err + } + err = secrets.DeploySecrets(ctx, gql.ToAppCompact(*extension.App), secrets.DeploymentArgs{ + Stage: false, + Detach: false, + CheckDNS: true, + }) } return err diff --git a/internal/command/extensions/tigris/destroy.go b/internal/command/extensions/tigris/destroy.go index fb9677d3ca..dd4aa2d136 100644 --- a/internal/command/extensions/tigris/destroy.go +++ b/internal/command/extensions/tigris/destroy.go @@ -67,7 +67,7 @@ func runDestroy(ctx context.Context) (err error) { client = flyutil.ClientFromContext(ctx).GenqClient() ) - _, err = gql.DeleteAddOn(ctx, client, extension.Name) + _, err = gql.DeleteAddOn(ctx, client, extension.Name, string(gql.AddOnTypeTigris)) if err != nil { return diff --git a/internal/command/extensions/tigris/status.go b/internal/command/extensions/tigris/status.go index d4dab6a4cf..4d7e5a7504 100644 --- a/internal/command/extensions/tigris/status.go +++ b/internal/command/extensions/tigris/status.go @@ -52,7 +52,7 @@ func runStatus(ctx context.Context) (err error) { }, } - var cols []string = []string{"Name", "Status"} + var cols = []string{"Name", "Status"} optionKeys := []string{"public", "shadow_bucket.write_through", "shadow_bucket.name", "shadow_bucket.endpoint"} @@ -86,7 +86,7 @@ func runStatus(ctx context.Context) (err error) { } } obj[0] = append(obj[0], value) - colName := strings.Title(strings.Replace(strings.Join(keys, " "), "_", " ", -1)) + colName := strings.Title(strings.ReplaceAll(strings.Join(keys, " "), "_", " ")) cols = append(cols, colName) } diff --git a/internal/command/extensions/tigris/update.go b/internal/command/extensions/tigris/update.go index 61d9b59bb1..1729372f6f 100644 --- a/internal/command/extensions/tigris/update.go +++ b/internal/command/extensions/tigris/update.go @@ -30,7 +30,6 @@ func update() (cmd *cobra.Command) { flag.String{ Name: "custom-domain", Description: "A custom domain name pointing at your bucket", - Hidden: true, }, flag.Bool{ @@ -41,7 +40,6 @@ func update() (cmd *cobra.Command) { flag.Bool{ Name: "clear-custom-domain", Description: "Remove a custom domain from a bucket", - Hidden: true, }, flag.Bool{ @@ -122,13 +120,18 @@ func runUpdate(ctx context.Context) (err error) { if flag.IsSpecified(ctx, "custom-domain") { domain := flag.GetString(ctx, "custom-domain") - - if domain != addOn.Name { - return fmt.Errorf("The custom domain must match the bucket name: %s != %s", domain, addOn.Name) + if len(domain) > 0 && flag.GetBool(ctx, "clear-custom-domain") { + return fmt.Errorf("You cannot specify both --custom-domain and --clear-custom-domain") } - fmt.Fprintf(io.Out, "Before continuing, set a DNS CNAME record to enable your custom domain: %s -> %s\n\n", domain, addOn.Name+".fly.storage.tigris.dev") - confirm, err := prompt.Confirm(ctx, "Continue with the update?") + confirm := false + if !flag.GetYes(ctx) { + fmt.Fprintf(io.Out, "Before continuing, set a DNS CNAME record to enable your custom domain: %s -> %s\n\n", domain, addOn.Name+".fly.storage.tigris.dev") + confirm, err = prompt.Confirm(ctx, "Continue with the update?") + } else { + fmt.Fprintf(io.Out, "By specifying the --yes flag you have agreed to set a DNS CNAME record to enable your custom domain: %s -> %s\n\n", domain, addOn.Name+".fly.storage.tigris.dev") + confirm = true + } if err != nil || !confirm { return err diff --git a/internal/command/extensions/vector/create.go b/internal/command/extensions/vector/create.go index c42a9aed94..870513df12 100644 --- a/internal/command/extensions/vector/create.go +++ b/internal/command/extensions/vector/create.go @@ -97,7 +97,7 @@ func runCreate(ctx context.Context) (err error) { return err } - var defaultDimensionCount int = 128 + var defaultDimensionCount = 128 var options = gql.AddOnOptions{ "similarity_function": function.Identifier, @@ -120,7 +120,11 @@ func runCreate(ctx context.Context) (err error) { } if extension.SetsSecrets { - err = secrets.DeploySecrets(ctx, gql.ToAppCompact(*extension.App), false, false) + err = secrets.DeploySecrets(ctx, gql.ToAppCompact(*extension.App), secrets.DeploymentArgs{ + Stage: false, + Detach: false, + CheckDNS: true, + }) } return err diff --git a/internal/command/extensions/vector/destroy.go b/internal/command/extensions/vector/destroy.go index 0c30b2a414..116bd49ba6 100644 --- a/internal/command/extensions/vector/destroy.go +++ b/internal/command/extensions/vector/destroy.go @@ -61,7 +61,7 @@ func runDestroy(ctx context.Context) (err error) { } client := flyutil.ClientFromContext(ctx).GenqClient() - if _, err := gql.DeleteAddOn(ctx, client, extension.Name); err != nil { + if _, err := gql.DeleteAddOn(ctx, client, extension.Name, string(gql.AddOnTypeUpstashVector)); err != nil { return err } diff --git a/internal/command/extensions/vector/status.go b/internal/command/extensions/vector/status.go index a144a3d8f2..ce429e5232 100644 --- a/internal/command/extensions/vector/status.go +++ b/internal/command/extensions/vector/status.go @@ -51,7 +51,7 @@ func runStatus(ctx context.Context) (err error) { }, } - var cols []string = []string{"Name", "Status", "Region"} + var cols = []string{"Name", "Status", "Region"} if app != nil { obj[0] = append(obj[0], app.Name) diff --git a/internal/command/extensions/wafris/create.go b/internal/command/extensions/wafris/create.go index 5d4274202f..b8bce0da32 100644 --- a/internal/command/extensions/wafris/create.go +++ b/internal/command/extensions/wafris/create.go @@ -42,7 +42,11 @@ func runCreate(ctx context.Context) (err error) { }) if extension.SetsSecrets { - err = secrets.DeploySecrets(ctx, gql.ToAppCompact(*extension.App), false, false) + err = secrets.DeploySecrets(ctx, gql.ToAppCompact(*extension.App), secrets.DeploymentArgs{ + Stage: false, + Detach: false, + CheckDNS: true, + }) } return err diff --git a/internal/command/extensions/wafris/destroy.go b/internal/command/extensions/wafris/destroy.go index 699699d363..20b5e3f571 100644 --- a/internal/command/extensions/wafris/destroy.go +++ b/internal/command/extensions/wafris/destroy.go @@ -61,7 +61,7 @@ func runDestroy(ctx context.Context) (err error) { } client := flyutil.ClientFromContext(ctx).GenqClient() - if _, err := gql.DeleteAddOn(ctx, client, extension.Name); err != nil { + if _, err := gql.DeleteAddOn(ctx, client, extension.Name, string(gql.AddOnTypeWafris)); err != nil { return err } diff --git a/internal/command/image/show.go b/internal/command/image/show.go index 885128c1f0..ee927e2ca6 100644 --- a/internal/command/image/show.go +++ b/internal/command/image/show.go @@ -152,7 +152,7 @@ func showMachineImage(ctx context.Context, app *fly.AppCompact) error { for _, machine := range machines { image := fmt.Sprintf("%s:%s", machine.ImageRef.Repository, machine.ImageRef.Tag) - latestImage, err := client.GetLatestImageDetails(ctx, image) + latestImage, err := client.GetLatestImageDetails(ctx, image, machine.ImageVersion()) if err != nil && strings.Contains(err.Error(), "Unknown repository") { continue diff --git a/internal/command/image/update.go b/internal/command/image/update.go index 87a8af4c66..18ce494004 100644 --- a/internal/command/image/update.go +++ b/internal/command/image/update.go @@ -38,7 +38,7 @@ The update will perform a rolling restart against each Machine, which may result }, flag.Bool{ Name: "skip-health-checks", - Description: "Skip waiting for health checks inbetween VM updates.", + Description: "Skip waiting for health checks between VM updates.", Default: false, }, ) diff --git a/internal/command/image/update_machines.go b/internal/command/image/update_machines.go index 47c2f2e2ee..0577e403da 100644 --- a/internal/command/image/update_machines.go +++ b/internal/command/image/update_machines.go @@ -8,6 +8,7 @@ import ( fly "github.com/superfly/fly-go" "github.com/superfly/flyctl/agent" "github.com/superfly/flyctl/flypg" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/flag" "github.com/superfly/flyctl/internal/flyutil" mach "github.com/superfly/flyctl/internal/machine" @@ -56,11 +57,16 @@ func updateImageForMachines(ctx context.Context, app *fly.AppCompact) error { eligible[machine] = *machineConf } + minvers, err := appsecrets.GetMinvers(app.Name) + if err != nil { + return err + } for machine, machineConf := range eligible { input := &fly.LaunchMachineInput{ - Region: machine.Region, - Config: &machineConf, - SkipHealthChecks: skipHealthChecks, + Region: machine.Region, + Config: &machineConf, + SkipHealthChecks: skipHealthChecks, + MinSecretsVersion: minvers, } if err := mach.Update(ctx, machine, input); err != nil { return err @@ -160,12 +166,19 @@ func updatePostgresOnMachines(ctx context.Context, app *fly.AppCompact) (err err } } + // XXX TODO: use case to think of here is that the machine wasnt provisioned with flyctl. + minvers, err := appsecrets.GetMinvers(app.Name) + if err != nil { + return err + } + // Update replicas for _, member := range members["replica"] { machine := member.Machine input := &fly.LaunchMachineInput{ - Region: machine.Region, - Config: &member.TargetConfig, + Region: machine.Region, + Config: &member.TargetConfig, + MinSecretsVersion: minvers, } if err := mach.Update(ctx, machine, input); err != nil { return err @@ -176,9 +189,10 @@ func updatePostgresOnMachines(ctx context.Context, app *fly.AppCompact) (err err for _, member := range members["barman"] { machine := member.Machine input := &fly.LaunchMachineInput{ - Region: machine.Region, - Config: &member.TargetConfig, - SkipHealthChecks: true, + Region: machine.Region, + Config: &member.TargetConfig, + SkipHealthChecks: true, + MinSecretsVersion: minvers, } if err := mach.Update(ctx, machine, input); err != nil { return err @@ -191,8 +205,9 @@ func updatePostgresOnMachines(ctx context.Context, app *fly.AppCompact) (err err machine := primary.Machine input := &fly.LaunchMachineInput{ - Region: machine.Region, - Config: &primary.TargetConfig, + Region: machine.Region, + Config: &primary.TargetConfig, + MinSecretsVersion: minvers, } if err := mach.Update(ctx, machine, input); err != nil { return err @@ -225,8 +240,9 @@ func updatePostgresOnMachines(ctx context.Context, app *fly.AppCompact) (err err // Update leader input := &fly.LaunchMachineInput{ - Region: machine.Region, - Config: &leader.TargetConfig, + Region: machine.Region, + Config: &leader.TargetConfig, + MinSecretsVersion: minvers, } if err := mach.Update(ctx, machine, input); err != nil { return err @@ -263,7 +279,7 @@ func resolveImage(ctx context.Context, machine fly.Machine) (string, error) { if image == "" { ref := fmt.Sprintf("%s:%s", machine.ImageRef.Repository, machine.ImageRef.Tag) - latestImage, err := client.GetLatestImageDetails(ctx, ref) + latestImage, err := client.GetLatestImageDetails(ctx, ref, machine.ImageVersion()) if err != nil && !strings.Contains(err.Error(), "Unknown repository") { return "", err } diff --git a/internal/command/ips/allocate_interactive.go b/internal/command/ips/allocate_interactive.go new file mode 100644 index 0000000000..53f1a9c02c --- /dev/null +++ b/internal/command/ips/allocate_interactive.go @@ -0,0 +1,274 @@ +package ips + +import ( + "context" + "fmt" + "reflect" + + "github.com/spf13/cobra" + fly "github.com/superfly/fly-go" + "github.com/superfly/fly-go/flaps" + "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/command" + "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flapsutil" + "github.com/superfly/flyctl/internal/flyutil" + "github.com/superfly/flyctl/internal/machine" + "github.com/superfly/flyctl/internal/prompt" + "github.com/superfly/flyctl/iostreams" +) + +func newAllocate() *cobra.Command { + const ( + long = `Allocate recommended IP addresses for the application` + short = `Allocate recommended IP addresses` + ) + + cmd := command.New("allocate", short, long, runAllocateInteractive, + command.RequireSession, + command.RequireAppName, + ) + + flag.Add(cmd, + flag.App(), + flag.AppConfig(), + flag.Region(), + ) + + return cmd +} + +func determineIPTypeFromDeployedServices(ctx context.Context, appName string) (requiresDedicated bool, hasServices bool, hasUDP bool, err error) { + flapsClient, err := flapsutil.NewClientWithOptions(ctx, flaps.NewClientOpts{ + AppName: appName, + }) + if err != nil { + return false, false, false, fmt.Errorf("could not create flaps client: %w", err) + } + ctx = flapsutil.NewContextWithClient(ctx, flapsClient) + + machines, err := machine.ListActive(ctx) + if err != nil { + return false, false, false, fmt.Errorf("could not list machines: %w", err) + } + + if len(machines) == 0 { + return false, false, false, nil + } + + hasServices = false + hasUDP = false + requiresDedicated = false + + for _, machine := range machines { + if machine.Config == nil { + continue + } + + for _, service := range machine.Config.Services { + hasServices = true + + switch service.Protocol { + case "udp": + hasUDP = true + case "tcp": + for _, port := range service.Ports { + if port.HasNonHttpPorts() { + requiresDedicated = true + } else if port.ContainsPort(80) && !reflect.DeepEqual(port.Handlers, []string{"http"}) { + requiresDedicated = true + } else if port.ContainsPort(443) && !(reflect.DeepEqual(port.Handlers, []string{"http", "tls"}) || reflect.DeepEqual(port.Handlers, []string{"tls", "http"})) { + requiresDedicated = true + } + } + } + } + } + + return requiresDedicated, hasServices, hasUDP, nil +} + +func runAllocateInteractive(ctx context.Context) error { + client := flyutil.ClientFromContext(ctx) + appName := appconfig.NameFromContext(ctx) + io := iostreams.FromContext(ctx) + colorize := io.ColorScheme() + + requiresDedicated, hasServices, hasUDP, err := determineIPTypeFromDeployedServices(ctx, appName) + if err != nil { + return fmt.Errorf("failed to check deployed services: %w", err) + } + + if !hasServices { + fmt.Fprintln(io.Out, "No services are currently deployed on this app.") + fmt.Fprintln(io.Out, "IP addresses are only needed if you have services with external ports configured.") + + confirmed, err := prompt.Confirm(ctx, "Would you like to allocate IP addresses anyway?") + if err != nil { + if prompt.IsNonInteractive(err) { + return prompt.NonInteractiveError("use fly ips allocate-v4 or fly ips allocate-v6 in non-interactive mode") + } + return err + } + if !confirmed { + return nil + } + } + + existingIPs, err := client.GetIPAddresses(ctx, appName) + if err != nil { + return fmt.Errorf("failed to get existing IP addresses: %w", err) + } + + hasV4 := false + hasSharedV4 := false + hasV6 := false + for _, ip := range existingIPs { + if ip.Type == "v4" { + hasV4 = true + } + if ip.Type == "shared_v4" { + hasSharedV4 = true + } + if ip.Type == "v6" { + hasV6 = true + } + } + + if len(existingIPs) > 0 { + fmt.Fprint(io.Out, "Your app already has the following IP addresses:\n\n") + renderListTable(ctx, existingIPs) + } + + recommendDedicated := requiresDedicated && hasSharedV4 && !hasV4 + if (hasV4 || hasSharedV4) && hasV6 && !recommendDedicated { + fmt.Fprintln(io.Out, "Your app has all necessary IP addresses.") + fmt.Fprintln(io.Out, "To allocate more addresses, run:") + fmt.Fprintf(io.Out, " %s (dedicated IPv4)\n", colorize.Bold("fly ips allocate-v4")) + if !hasSharedV4 { + fmt.Fprintf(io.Out, " %s (shared IPv4)\n", colorize.Bold("fly ips allocate-v4 --shared")) + } + fmt.Fprintf(io.Out, " %s (dedicated IPv6)\n", colorize.Bold("fly ips allocate-v6")) + fmt.Fprintf(io.Out, " %s (private IPv6)\n", colorize.Bold("fly ips allocate-v6 --private")) + return nil + } + + allocateV6 := false + allocateSharedV4 := false + allocateDedicatedV4 := false + msg := "" + + if recommendDedicated { + msg = `Your app has a service that requires a dedicated IPv4 address, but you currently have a shared IPv4. +Would you like to allocate a dedicated IPv4 address? + IPv4: Dedicated ($2/mo)` + + allocateDedicatedV4 = true + } else if hasUDP && !hasV4 { + msg = `Your app has a UDP service that requires a dedicated IPv4 address. +Would you like to allocate the following addresses? + IPv4: Dedicated ($2/mo) + IPv6: None (Fly.io does not support UDP over public IPv6)` + + allocateDedicatedV4 = true + } else if !hasV4 && !hasV6 && !hasSharedV4 { + if requiresDedicated { + msg = `Your app has a service that requires a dedicated IPv4 address. +Would you like to allocate the following addresses? + IPv4: Dedicated ($2/mo) + IPv6: Dedicated (no charge)` + + allocateDedicatedV4 = true + allocateV6 = true + } else { + msg = `Would you like to allocate the following addresses? + IPv4: Shared (no charge) + IPv6: Dedicated (no charge)` + + allocateSharedV4 = true + allocateV6 = true + } + } else if !hasV4 && !hasSharedV4 { + if requiresDedicated { + msg = `Your app has a service that requires a dedicated IPv4 address. +Would you like to allocate the following address? + IPv4: Dedicated ($2/mo)` + + allocateDedicatedV4 = true + } else { + msg = `Would you like to allocate the following address? + IPv4: Shared (no charge)` + + allocateSharedV4 = true + } + } else if !hasV6 { + msg = `Would you like to allocate the following address? + IPv6: Dedicated (no charge)` + + allocateV6 = true + } + + if len(msg) == 0 { + return nil + } + + confirmed, err := prompt.Confirm(ctx, msg) + if err != nil { + if prompt.IsNonInteractive(err) { + return prompt.NonInteractiveError("use fly ips allocate-v4 or fly ips allocate-v6 in non-interactive mode") + } + return err + } + + if !confirmed { + fmt.Fprintln(io.Out, "\nTo customize your IP allocations, run:") + fmt.Fprintf(io.Out, " %s (dedicated IPv4)\n", colorize.Bold("fly ips allocate-v4")) + if !hasSharedV4 { + fmt.Fprintf(io.Out, " %s (shared IPv4)\n", colorize.Bold("fly ips allocate-v4 --shared")) + } + fmt.Fprintf(io.Out, " %s (dedicated IPv6)\n", colorize.Bold("fly ips allocate-v6")) + fmt.Fprintf(io.Out, " %s (private IPv6)\n", colorize.Bold("fly ips allocate-v6 --private")) + return nil + } + fmt.Fprintln(io.Out, "") + + if allocateSharedV4 { + fmt.Fprintln(io.Out, "Allocating shared IPv4...") + ipAddress, err := client.AllocateSharedIPAddress(ctx, appName) + if err != nil { + return err + } + + renderSharedTable(ctx, ipAddress) + } + + if allocateDedicatedV4 { + fmt.Fprintln(io.Out, "Allocating dedicated IPv4...") + region := flag.GetRegion(ctx) + ipAddress, err := client.AllocateIPAddress(ctx, appName, "v4", region, nil, "") + if err != nil { + return fmt.Errorf("failed to allocate dedicated IPv4: %w", err) + } + + ipAddresses := []fly.IPAddress{*ipAddress} + renderListTable(ctx, ipAddresses) + } + + if allocateV6 { + fmt.Fprintln(io.Out, "Allocating IPv6...") + region := flag.GetRegion(ctx) + ipAddress, err := client.AllocateIPAddress(ctx, appName, "v6", region, nil, "") + if err != nil { + return fmt.Errorf("failed to allocate IPv6: %w", err) + } + + ipAddresses := []fly.IPAddress{*ipAddress} + renderListTable(ctx, ipAddresses) + } + + if allocateSharedV4 && !hasV4 { + fmt.Fprintf(io.Out, "Note: You've been allocated a shared IPv4 address. To get a dedicated IPv4 address, run: %s\n", colorize.Bold("fly ips allocate-v4")) + } + + return nil +} diff --git a/internal/command/ips/ips.go b/internal/command/ips/ips.go index 3e991da797..baaa59f1de 100644 --- a/internal/command/ips/ips.go +++ b/internal/command/ips/ips.go @@ -16,6 +16,7 @@ func New() *cobra.Command { cmd.Aliases = []string{"ip"} cmd.AddCommand( newList(), + newAllocate(), newAllocatev4(), newAllocatev6(), newPrivate(), diff --git a/internal/command/ips/private.go b/internal/command/ips/private.go index e8c9f522c9..01e44b620a 100644 --- a/internal/command/ips/private.go +++ b/internal/command/ips/private.go @@ -8,7 +8,10 @@ import ( "github.com/superfly/flyctl/internal/appconfig" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flag/flagnames" "github.com/superfly/flyctl/internal/flapsutil" + "github.com/superfly/flyctl/internal/render" + "github.com/superfly/flyctl/iostreams" ) func newPrivate() *cobra.Command { @@ -44,7 +47,21 @@ func runPrivateIPAddressesList(ctx context.Context) error { if err != nil { return err } - renderPrivateTableMachines(ctx, machines) + + if flag.GetBool(ctx, flagnames.JSONOutput) { + privateIpAddresses := make([]string, 0, len(machines)) + + for _, machine := range machines { + if machine.PrivateIP != "" { + privateIpAddresses = append(privateIpAddresses, machine.PrivateIP) + } + } + + out := iostreams.FromContext(ctx).Out + return render.JSON(out, privateIpAddresses) + } else { + renderPrivateTableMachines(ctx, machines) + } return nil } diff --git a/internal/command/launch/cmd.go b/internal/command/launch/cmd.go index 763de8531e..345a0f0b93 100644 --- a/internal/command/launch/cmd.go +++ b/internal/command/launch/cmd.go @@ -16,13 +16,15 @@ import ( "github.com/samber/lo" "github.com/spf13/cobra" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/command/deploy" "github.com/superfly/flyctl/internal/command/launch/plan" "github.com/superfly/flyctl/internal/env" "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flag/validation" + "github.com/superfly/flyctl/internal/flapsutil" "github.com/superfly/flyctl/internal/flyerr" - "github.com/superfly/flyctl/internal/flyutil" "github.com/superfly/flyctl/internal/metrics" "github.com/superfly/flyctl/internal/prompt" "github.com/superfly/flyctl/internal/state" @@ -37,10 +39,10 @@ func New() (cmd *cobra.Command) { short = `Create and configure a new app from source code or a Docker image` ) - cmd = command.New("launch", short, long, run, command.RequireSession, command.LoadAppConfigIfPresent) + cmd = command.New("launch", short, long, run, command.RequireSession, command.RequireUiex, command.LoadAppConfigIfPresent) cmd.Args = cobra.NoArgs - flag.Add(cmd, + flags := []flag.Flag{ // Since launch can perform a deployment, we offer the full set of deployment flags for those using // the launch command in CI environments. We may want to rescind this decision down the line, because // the list of flags is long, but it follows from the precedent of already offering some deployment flags. @@ -128,6 +130,11 @@ func New() (cmd *cobra.Command) { Description: "Skip automatically provisioning an object storage bucket", Default: false, }, + flag.Bool{ + Name: "no-github-workflow", + Description: "Skip automatically provisioning a GitHub fly deploy workflow", + Default: false, + }, flag.Bool{ Name: "json", Description: "Generate configuration in JSON format", @@ -150,7 +157,32 @@ func New() (cmd *cobra.Command) { Default: false, Hidden: true, }, - ) + flag.String{ + Name: "auto-stop", + Description: "Automatically suspend the app after a period of inactivity. Valid values are 'off', 'stop', and 'suspend'", + Default: "stop", + }, + flag.String{ + Name: "command", + Description: "The command to override the Docker CND.", + }, + flag.StringSlice{ + Name: "volume", + Shorthand: "v", + Description: "Volume to mount, in the form of :/path/inside/machine[:]", + }, + flag.StringArray{ + Name: "secret", + Description: "Set of secrets in the form of NAME=VALUE pairs. Can be specified multiple times.", + }, + flag.String{ + Name: "db", + Description: "Provision a Postgres database. Options: mpg (managed postgres), upg/legacy (unmanaged postgres), or true (default type)", + NoOptDefVal: "true", + }, + } + + flag.Add(cmd, flags...) cmd.AddCommand(newSessions()) cmd.AddCommand(NewPlan()) @@ -256,7 +288,11 @@ func run(ctx context.Context) (err error) { return err } - defer tp.Shutdown(ctx) + defer func() { + shutdownCtx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) + defer cancel() + tp.Shutdown(shutdownCtx) + }() ctx, span := tracing.CMDSpan(ctx, "cmd.launch") defer span.End() @@ -288,6 +324,19 @@ func run(ctx context.Context) (err error) { return err } + // Validate conflicting postgres flags + if err := validatePostgresFlags(ctx); err != nil { + return err + } + + if err := validation.ValidateCompressionFlag(flag.GetString(ctx, "compression")); err != nil { + return err + } + + if err := validation.ValidateCompressionLevelFlag(flag.GetInt(ctx, "compression-level")); err != nil { + return err + } + var ( launchManifest *LaunchManifest cache *planBuildCache @@ -354,6 +403,11 @@ func run(ctx context.Context) (err error) { } } + // Override internal port if requested using --internal-port flag + if n := flag.GetInt(ctx, "internal-port"); n > 0 { + launchManifest.Plan.HttpServicePort = n + } + span.SetAttributes(attribute.String("app.name", launchManifest.Plan.AppName)) status.AppName = launchManifest.Plan.AppName @@ -368,7 +422,7 @@ func run(ctx context.Context) (err error) { status.VM.ProcessN = len(vm.Processes) } - status.HasPostgres = launchManifest.Plan.Postgres.FlyPostgres != nil + status.HasPostgres = launchManifest.Plan.Postgres.FlyPostgres != nil || launchManifest.Plan.Postgres.SupabasePostgres != nil || launchManifest.Plan.Postgres.ManagedPostgres != nil status.HasRedis = launchManifest.Plan.Redis.UpstashRedis != nil status.HasSentry = launchManifest.Plan.Sentry @@ -449,8 +503,13 @@ func run(ctx context.Context) (err error) { exports[name] = strings.ReplaceAll(secret, "${FLYCAST_URL}", flycast) } - apiClient := flyutil.ClientFromContext(parentCtx) - _, err := apiClient.SetSecrets(parentCtx, parentConfig.AppName, exports) + // This might be duplicate work? Is there a saner place to build the client and stash it in the context? + parentCtx, flapsClient, _, err := flapsutil.SetClient(parentCtx, nil, parentConfig.AppName) + if err != nil { + return fmt.Errorf("making client for %s: %w", parentConfig.AppName, err) + } + + err = appsecrets.Update(parentCtx, flapsClient, parentConfig.AppName, exports, nil) if err != nil { return err } @@ -486,3 +545,38 @@ func warnLegacyBehavior(ctx context.Context) error { } return nil } + +// validatePostgresFlags checks for conflicting postgres-related flags +func validatePostgresFlags(ctx context.Context) error { + dbFlag := flag.GetString(ctx, "db") + noDb := flag.GetBool(ctx, "no-db") + + // Normalize db flag values + switch dbFlag { + case "true", "1", "yes": + dbFlag = "true" + case "mpg", "managed": + dbFlag = "mpg" + case "upg", "unmanaged", "legacy": + dbFlag = "upg" + case "false", "0", "no", "": + dbFlag = "" + default: + if dbFlag != "" { + return flyerr.GenericErr{ + Err: fmt.Sprintf("Invalid value '%s' for --db flag", dbFlag), + Suggest: "Valid options: mpg (managed postgres), upg/legacy (unmanaged postgres), or true (default type)", + } + } + } + + // Check if db flag conflicts with --no-db + if dbFlag != "" && noDb { + return flyerr.GenericErr{ + Err: "Cannot specify both --db and --no-db", + Suggest: "Remove either --db or --no-db", + } + } + + return nil +} diff --git a/internal/command/launch/cmd_test.go b/internal/command/launch/cmd_test.go new file mode 100644 index 0000000000..4634325dbf --- /dev/null +++ b/internal/command/launch/cmd_test.go @@ -0,0 +1,234 @@ +package launch + +import ( + "context" + "strings" + "testing" + + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" + fly "github.com/superfly/fly-go" + "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/flag/flagctx" + "github.com/superfly/flyctl/iostreams" +) + +func TestValidatePostgresFlags(t *testing.T) { + tests := []struct { + name string + dbFlag string + noDbFlag bool + expectError bool + errorMsg string + }{ + { + name: "valid mpg flag", + dbFlag: "mpg", + noDbFlag: false, + expectError: false, + }, + { + name: "valid upg flag", + dbFlag: "upg", + noDbFlag: false, + expectError: false, + }, + { + name: "valid legacy flag", + dbFlag: "legacy", + noDbFlag: false, + expectError: false, + }, + { + name: "valid true flag", + dbFlag: "true", + noDbFlag: false, + expectError: false, + }, + { + name: "valid empty flag", + dbFlag: "", + noDbFlag: false, + expectError: false, + }, + { + name: "invalid flag value", + dbFlag: "invalid", + noDbFlag: false, + expectError: true, + errorMsg: "Invalid value 'invalid' for --db flag", + }, + { + name: "conflicting db and no-db", + dbFlag: "mpg", + noDbFlag: true, + expectError: true, + errorMsg: "Cannot specify both --db and --no-db", + }, + { + name: "conflicting upg and no-db", + dbFlag: "upg", + noDbFlag: true, + expectError: true, + errorMsg: "Cannot specify both --db and --no-db", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a context with iostreams + ctx := context.Background() + ctx = iostreams.NewContext(ctx, iostreams.System()) + + // Create a test context with flags + flagSet := pflag.NewFlagSet("test", pflag.ContinueOnError) + flagSet.String("db", tt.dbFlag, "") + flagSet.Bool("no-db", tt.noDbFlag, "") + ctx = flagctx.NewContext(ctx, flagSet) + + err := validatePostgresFlags(ctx) + + if tt.expectError { + if err == nil { + t.Errorf("expected error but got none") + return + } + if tt.errorMsg != "" && !strings.Contains(err.Error(), tt.errorMsg) { + t.Errorf("expected error message to contain '%s', got '%s'", tt.errorMsg, err.Error()) + } + } else { + if err != nil { + t.Errorf("expected no error but got: %v", err) + } + } + }) + } +} + +func TestParseMountOptions(t *testing.T) { + tests := []struct { + name string + options string + expectedMount appconfig.Mount + expectError bool + errMsg string + }{ + { + name: "empty options", + options: "", + expectedMount: appconfig.Mount{}, + }, + { + name: "scheduled_snapshots true", + options: "scheduled_snapshots=true", + expectedMount: appconfig.Mount{ + ScheduledSnapshots: fly.Pointer(true), + }, + }, + { + name: "scheduled_snapshots false", + options: "scheduled_snapshots=false", + expectedMount: appconfig.Mount{ + ScheduledSnapshots: fly.Pointer(false), + }, + }, + { + name: "scheduled_snapshots invalid value", + options: "scheduled_snapshots=invalid", + expectError: true, + errMsg: "invalid value for scheduled_snapshots", + }, + { + name: "snapshot_retention", + options: "snapshot_retention=7", + expectedMount: appconfig.Mount{ + SnapshotRetention: fly.Pointer(7), + }, + }, + { + name: "snapshot_retention invalid", + options: "snapshot_retention=invalid", + expectError: true, + errMsg: "invalid value for snapshot_retention", + }, + { + name: "initial_size", + options: "initial_size=10GB", + expectedMount: appconfig.Mount{ + InitialSize: "10GB", + }, + }, + { + name: "auto_extend_size_threshold", + options: "auto_extend_size_threshold=80", + expectedMount: appconfig.Mount{ + AutoExtendSizeThreshold: 80, + }, + }, + { + name: "auto_extend_size_threshold invalid", + options: "auto_extend_size_threshold=invalid", + expectError: true, + errMsg: "invalid value for auto_extend_size_threshold", + }, + { + name: "auto_extend_size_increment", + options: "auto_extend_size_increment=1GB", + expectedMount: appconfig.Mount{ + AutoExtendSizeIncrement: "1GB", + }, + }, + { + name: "auto_extend_size_limit", + options: "auto_extend_size_limit=100GB", + expectedMount: appconfig.Mount{ + AutoExtendSizeLimit: "100GB", + }, + }, + { + name: "multiple options", + options: "initial_size=10GB,scheduled_snapshots=true,snapshot_retention=14", + expectedMount: appconfig.Mount{ + InitialSize: "10GB", + ScheduledSnapshots: fly.Pointer(true), + SnapshotRetention: fly.Pointer(14), + }, + }, + { + name: "unknown option", + options: "unknown_option=value", + expectError: true, + errMsg: "unknown mount option", + }, + { + name: "invalid format", + options: "invalid_format", + expectError: true, + errMsg: "invalid mount option", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mount := &appconfig.Mount{} + err := ParseMountOptions(mount, tt.options) + + if tt.expectError { + if err == nil { + t.Errorf("expected error but got none") + return + } + if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) { + t.Errorf("expected error message to contain '%s', got '%s'", tt.errMsg, err.Error()) + } + } else { + if err != nil { + t.Errorf("expected no error but got: %v", err) + return + } + + assert.Equal(t, tt.expectedMount, *mount) + } + }) + } +} diff --git a/internal/command/launch/deploy.go b/internal/command/launch/deploy.go index f7c2e20dec..91a82dbc31 100644 --- a/internal/command/launch/deploy.go +++ b/internal/command/launch/deploy.go @@ -29,18 +29,10 @@ func (state *launchState) firstDeploy(ctx context.Context) error { // TODO(Allison): Do we want to make the executive decision to just *always* deploy? // Feedback(Sam): scanners need the abiiity to abort the deploy if they detect a problem - deployNow := true - // deployNow := false - // promptForDeploy := true - - if state.sourceInfo.SkipDeploy || flag.GetBool(ctx, "no-deploy") { - deployNow = false - // promptForDeploy = false - } + deployNow := !(state.sourceInfo.SkipDeploy || flag.GetBool(ctx, "no-deploy")) if flag.GetBool(ctx, "now") { deployNow = true - // promptForDeploy = false } if flag.GetBool(ctx, "no-create") { diff --git a/internal/command/launch/describe_plan.go b/internal/command/launch/describe_plan.go index 68fad142d2..b613880d95 100644 --- a/internal/command/launch/describe_plan.go +++ b/internal/command/launch/describe_plan.go @@ -6,8 +6,8 @@ import ( "strings" "github.com/samber/lo" - fly "github.com/superfly/fly-go" "github.com/superfly/flyctl/internal/command/launch/plan" + "github.com/superfly/flyctl/internal/command/mpg" "github.com/superfly/flyctl/internal/command/redis" ) @@ -17,29 +17,33 @@ import ( const descriptionNone = "" func describePostgresPlan(launchPlan *plan.LaunchPlan) (string, error) { - switch provider := launchPlan.Postgres.Provider().(type) { case *plan.FlyPostgresPlan: return describeFlyPostgresPlan(provider) case *plan.SupabasePostgresPlan: return describeSupabasePostgresPlan(provider, launchPlan) + case *plan.ManagedPostgresPlan: + return describeManagedPostgresPlan(provider, launchPlan) } return descriptionNone, nil } func describeFlyPostgresPlan(p *plan.FlyPostgresPlan) (string, error) { - - nodePlural := lo.Ternary(p.Nodes == 1, "", "s") - nodesStr := fmt.Sprintf("(Fly Postgres) %d Node%s", p.Nodes, nodePlural) - - guestStr := fly.MachinePresets[p.VmSize].String() - + guestStr := "" + if p.VmRam > 1024 { + guestStr = fmt.Sprintf("%s, %dGB RAM", p.VmSize, p.VmRam/1024) + } else { + guestStr = fmt.Sprintf("%s, %dMB RAM", p.VmSize, p.VmRam) + } diskSizeStr := fmt.Sprintf("%dGB disk", p.DiskSizeGB) - info := []string{nodesStr, guestStr, diskSizeStr} + info := []string{guestStr, diskSizeStr} if p.AutoStop { info = append(info, "auto-stop") } + if p.Price > 0 { + info = append(info, fmt.Sprintf("$%d/mo", p.Price)) + } return strings.Join(info, ", "), nil } @@ -49,18 +53,17 @@ func describeSupabasePostgresPlan(p *plan.SupabasePostgresPlan, launchPlan *plan return fmt.Sprintf("(Supabase) %s in %s", p.GetDbName(launchPlan), p.GetRegion(launchPlan)), nil } -func describeRedisPlan(ctx context.Context, p plan.RedisPlan, org *fly.Organization) (string, error) { +func describeRedisPlan(ctx context.Context, p plan.RedisPlan) (string, error) { switch provider := p.Provider().(type) { case *plan.UpstashRedisPlan: - return describeUpstashRedisPlan(ctx, provider, org) + return describeUpstashRedisPlan(ctx, provider) } return descriptionNone, nil } -func describeUpstashRedisPlan(ctx context.Context, p *plan.UpstashRedisPlan, org *fly.Organization) (string, error) { - - plan, err := redis.DeterminePlan(ctx, org) +func describeUpstashRedisPlan(ctx context.Context, p *plan.UpstashRedisPlan) (string, error) { + plan, err := redis.DeterminePlan(ctx) if err != nil { return "", fmt.Errorf("redis plan not found: %w", err) } @@ -76,3 +79,25 @@ func describeObjectStoragePlan(p plan.ObjectStoragePlan) (string, error) { return "private bucket", nil } + +func describeManagedPostgresPlan(p *plan.ManagedPostgresPlan, launchPlan *plan.LaunchPlan) (string, error) { + info := []string{} + + planDetails, ok := mpg.MPGPlans[p.Plan] + + if p.DbName != "" { + info = append(info, fmt.Sprintf("\"%s\"", p.GetDbName(launchPlan))) + } + + if ok { + info = append(info, fmt.Sprintf("%s plan ($%d/mo)", planDetails.Name, planDetails.PricePerMo)) + } else { + info = append(info, fmt.Sprintf("plan %s", p.Plan)) + } + + if p.Region != "" { + info = append(info, fmt.Sprintf("region %s", p.GetRegion(launchPlan))) + } + + return strings.Join(info, ", "), nil +} diff --git a/internal/command/launch/dockerfiles.go b/internal/command/launch/dockerfiles.go index 9fcc85a4b9..6ee55c6d1f 100644 --- a/internal/command/launch/dockerfiles.go +++ b/internal/command/launch/dockerfiles.go @@ -103,7 +103,7 @@ func createDockerignoreFromGitignores(root string, gitIgnores []string) (string, } else { f.Write(linebreak) } - _, err := f.WriteString(fmt.Sprintf("# flyctl launch added from %s\n", relFile)) + _, err := fmt.Fprintf(f, "# flyctl launch added from %s\n", relFile) if err != nil { return "", err } diff --git a/internal/command/launch/launch.go b/internal/command/launch/launch.go index 52bc26f687..9dc43e2240 100644 --- a/internal/command/launch/launch.go +++ b/internal/command/launch/launch.go @@ -4,11 +4,16 @@ import ( "context" "fmt" "path/filepath" + "regexp" + "strconv" "strings" + "github.com/docker/go-units" fly "github.com/superfly/fly-go" "github.com/superfly/fly-go/flaps" + "github.com/superfly/flyctl/helpers" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/command/launch/plan" "github.com/superfly/flyctl/internal/flag" "github.com/superfly/flyctl/internal/flag/flagnames" @@ -29,28 +34,31 @@ func (state *launchState) Launch(ctx context.Context) error { return err } - updateConfig(state.Plan, state.env, state.appConfig) + state.updateConfig(ctx, state.Plan, state.env, state.appConfig) if err := state.validateExtensions(ctx); err != nil { return err } - org, err := state.Org(ctx) + org, err := state.orgCompact(ctx) if err != nil { return err } - if !planValidateHighAvailability(ctx, state.Plan, org, !state.warnedNoCcHa) { + if !planValidateHighAvailability(ctx, state.Plan, org.Billable, !state.warnedNoCcHa) { state.Plan.HighAvailability = false state.warnedNoCcHa = true } planStep := plan.GetPlanStep(ctx) + var flapsClient flapsutil.FlapsClient if !flag.GetBool(ctx, "no-create") && (planStep == "" || planStep == "create") { - app, err := state.createApp(ctx) + f, app, err := state.createApp(ctx) if err != nil { return err } + flapsClient = f + fmt.Fprintf(io.Out, "Created app '%s' in organization '%s'\n", app.Name, app.Organization.Slug) fmt.Fprintf(io.Out, "Admin URL: https://fly.io/apps/%s\n", app.Name) fmt.Fprintf(io.Out, "Hostname: %s.fly.dev\n", app.Name) @@ -60,6 +68,14 @@ func (state *launchState) Launch(ctx context.Context) error { } } + if flapsClient == nil { + flapsClient, err = flapsutil.NewClientWithOptions(ctx, flaps.NewClientOpts{AppName: state.appConfig.AppName}) + if err != nil { + return err + } + } + ctx = flapsutil.NewContextWithClient(ctx, flapsClient) + // TODO: ideally this would be passed as a part of the plan to the Launch UI // and allow choices of what actions are desired to be make there. if state.sourceInfo != nil && state.sourceInfo.GitHubActions.Deploy { @@ -109,6 +125,49 @@ func (state *launchState) Launch(ctx context.Context) error { } } + // if the user specified a command, set it in the app config + if flag.GetString(ctx, "command") != "" { + if state.appConfig.Processes == nil { + state.appConfig.Processes = make(map[string]string) + } + + state.appConfig.Processes["app"] = flag.GetString(ctx, "command") + } + + volumes := flag.GetStringSlice(ctx, "volume") + if len(volumes) > 0 { + v := volumes[0] + splittedIDDestOpts := strings.Split(v, ":") + + if len(splittedIDDestOpts) < 2 { + re := regexp.MustCompile(`(?m)^VOLUME\s+(\[\s*")?(\/[\w\/]*?(\w+))("\s*\])?\s*$`) + m := re.FindStringSubmatch(splittedIDDestOpts[0]) + + if len(m) > 0 { + state.appConfig.Mounts = []appconfig.Mount{ + { + Source: m[3], // last part of path + Destination: m[2], // full path + }, + } + } + } else { + // if the user specified a volume, set it in the app config + state.appConfig.Mounts = []appconfig.Mount{ + { + Source: splittedIDDestOpts[0], + Destination: splittedIDDestOpts[1], + }, + } + + if len(splittedIDDestOpts) > 2 { + if err := ParseMountOptions(&state.appConfig.Mounts[0], splittedIDDestOpts[2]); err != nil { + return err + } + } + } + } + // Finally write application configuration to fly.toml configDir, configFile := filepath.Split(state.configPath) configFileOverride := flag.GetString(ctx, flagnames.AppConfigFilePath) @@ -129,7 +188,29 @@ func (state *launchState) Launch(ctx context.Context) error { return err } + // Add secrets to the app + if secretsFlag := flag.GetStringArray(ctx, "secret"); len(secretsFlag) > 0 { + secrets := make(map[string]string, len(secretsFlag)) + for _, secret := range secretsFlag { + kv := strings.SplitN(secret, "=", 2) + if len(kv) != 2 { + return fmt.Errorf("invalid secret format: %s, expected NAME=VALUE", secret) + } + key := strings.TrimSpace(kv[0]) + value := strings.TrimSpace(kv[1]) + secrets[key] = value + } + + if err := appsecrets.Update(ctx, flapsClient, state.appConfig.AppName, secrets, nil); err != nil { + return err + } + } + if state.sourceInfo != nil { + if state.appConfig.Deploy != nil && state.appConfig.Deploy.SeedCommand != "" { + ctx = appconfig.WithSeedCommand(ctx, state.appConfig.Deploy.SeedCommand) + } + if err := state.firstDeploy(ctx); err != nil { return err } @@ -138,6 +219,53 @@ func (state *launchState) Launch(ctx context.Context) error { return nil } +func ParseMountOptions(mount *appconfig.Mount, options string) error { + if options == "" { + return nil + } + + pairs := strings.Split(options, ",") + for _, pair := range pairs { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return fmt.Errorf("invalid mount option: %s", pair) + } + key := strings.TrimSpace(kv[0]) + value := strings.TrimSpace(kv[1]) + + switch key { + case "initial_size": + mount.InitialSize = value + case "snapshot_retention": + ret, err := strconv.Atoi(value) + if err != nil { + return fmt.Errorf("invalid value for snapshot_retention: %s", value) + } + mount.SnapshotRetention = &ret + case "scheduled_snapshots": + ret, err := strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("invalid value for scheduled_snapshots: %s", value) + } + mount.ScheduledSnapshots = &ret + case "auto_extend_size_threshold": + threshold, err := strconv.Atoi(value) + if err != nil { + return fmt.Errorf("invalid value for auto_extend_size_threshold: %s", value) + } + mount.AutoExtendSizeThreshold = threshold + case "auto_extend_size_increment": + mount.AutoExtendSizeIncrement = value + case "auto_extend_size_limit": + mount.AutoExtendSizeLimit = value + default: + return fmt.Errorf("unknown mount option: %s", key) + } + } + + return nil +} + // Apply the freestanding Guest fields to the appConfig's Compute field // This is temporary, but allows us to start using Compute-based plans in flyctl *now* while the UI catches up in time. func (state *launchState) updateComputeFromDeprecatedGuestFields(ctx context.Context) error { @@ -164,18 +292,47 @@ func (state *launchState) updateComputeFromDeprecatedGuestFields(ctx context.Con } // updateConfig populates the appConfig with the plan's values -func updateConfig(plan *plan.LaunchPlan, env map[string]string, appConfig *appconfig.Config) { +// func updateConfig(plan *plan.LaunchPlan, env map[string]string, appConfig *appconfig.Config) { +func (state *launchState) updateConfig(ctx context.Context, plan *plan.LaunchPlan, env map[string]string, appConfig *appconfig.Config) { appConfig.AppName = plan.AppName appConfig.PrimaryRegion = plan.RegionCode if env != nil { appConfig.SetEnvVariables(env) } + + appConfig.Compute = plan.Compute + if plan.HttpServicePort != 0 { + autostop := fly.MachineAutostopStop + autostopFlag := flag.GetString(ctx, "auto-stop") + + if autostopFlag == "off" { + autostop = fly.MachineAutostopOff + } else if autostopFlag == "suspend" { + autostop = fly.MachineAutostopSuspend + + // if any compute has a GPU or more than 2GB of memory, set autostop to stop + for _, compute := range state.appConfig.Compute { + if compute.MachineGuest != nil && compute.MachineGuest.GPUKind != "" { + autostop = fly.MachineAutostopStop + break + } + + if compute.Memory != "" { + mb, err := helpers.ParseSize(compute.Memory, units.RAMInBytes, units.MiB) + if err != nil || mb >= 2048 { + autostop = fly.MachineAutostopStop + break + } + } + } + } + if appConfig.HTTPService == nil { appConfig.HTTPService = &appconfig.HTTPService{ ForceHTTPS: true, AutoStartMachines: fly.Pointer(true), - AutoStopMachines: fly.Pointer(fly.MachineAutostopStop), + AutoStopMachines: fly.Pointer(autostop), MinMachinesRunning: fly.Pointer(0), Processes: []string{"app"}, } @@ -184,6 +341,8 @@ func updateConfig(plan *plan.LaunchPlan, env map[string]string, appConfig *appco } else { appConfig.HTTPService = nil } + + // helper appConfig.Compute = plan.Compute if plan.CPUKind != "" { @@ -206,28 +365,29 @@ func updateConfig(plan *plan.LaunchPlan, env map[string]string, appConfig *appco } // createApp creates the fly.io app for the plan -func (state *launchState) createApp(ctx context.Context) (*fly.App, error) { +func (state *launchState) createApp(ctx context.Context) (flapsutil.FlapsClient, *fly.App, error) { apiClient := flyutil.ClientFromContext(ctx) - org, err := state.Org(ctx) + + org, err := state.orgCompact(ctx) if err != nil { - return nil, err + return nil, nil, err } app, err := apiClient.CreateApp(ctx, fly.CreateAppInput{ - OrganizationID: org.ID, + OrganizationID: org.Id, Name: state.Plan.AppName, PreferredRegion: &state.Plan.RegionCode, Machines: true, }) if err != nil { - return nil, err + return nil, nil, err } f, err := flapsutil.NewClientWithOptions(ctx, flaps.NewClientOpts{AppName: app.Name}) if err != nil { - return nil, err + return nil, nil, err } else if err := f.WaitForApp(ctx, app.Name); err != nil { - return nil, err + return nil, nil, err } - return app, nil + return f, app, nil } diff --git a/internal/command/launch/launch_databases.go b/internal/command/launch/launch_databases.go index 898476eeea..e71f9a18bb 100644 --- a/internal/command/launch/launch_databases.go +++ b/internal/command/launch/launch_databases.go @@ -5,16 +5,21 @@ import ( "fmt" "time" + "github.com/avast/retry-go/v4" "github.com/samber/lo" fly "github.com/superfly/fly-go" "github.com/superfly/flyctl/flypg" "github.com/superfly/flyctl/gql" + "github.com/superfly/flyctl/internal/appsecrets" extensions_core "github.com/superfly/flyctl/internal/command/extensions/core" - "github.com/superfly/flyctl/internal/command/extensions/supabase" "github.com/superfly/flyctl/internal/command/launch/plan" + "github.com/superfly/flyctl/internal/command/mpg" "github.com/superfly/flyctl/internal/command/postgres" "github.com/superfly/flyctl/internal/command/redis" + "github.com/superfly/flyctl/internal/flapsutil" "github.com/superfly/flyctl/internal/flyutil" + "github.com/superfly/flyctl/internal/uiex" + "github.com/superfly/flyctl/internal/uiexutil" "github.com/superfly/flyctl/iostreams" ) @@ -30,14 +35,18 @@ func (state *launchState) createDatabases(ctx context.Context) error { } } - if state.Plan.Postgres.SupabasePostgres != nil && (planStep == "" || planStep == "postgres") { - err := state.createSupabasePostgres(ctx) + if state.Plan.Postgres.ManagedPostgres != nil && (planStep == "" || planStep == "postgres") { + err := state.createManagedPostgres(ctx) if err != nil { // TODO(Ali): Make error printing here better. - fmt.Fprintf(iostreams.FromContext(ctx).ErrOut, "Error provisioning Supabase Postgres database: %s\n", err) + fmt.Fprintf(iostreams.FromContext(ctx).ErrOut, "Error creating Managed Postgres cluster: %s\n", err) } } + if state.Plan.Postgres.SupabasePostgres != nil && (planStep == "" || planStep == "postgres") { + fmt.Fprintf(iostreams.FromContext(ctx).ErrOut, "Supabase Postgres is no longer supported.\n") + } + if state.Plan.Redis.UpstashRedis != nil && (planStep == "" || planStep == "redis") { err := state.createUpstashRedis(ctx) if err != nil { @@ -148,42 +157,217 @@ func (state *launchState) createFlyPostgres(ctx context.Context) error { fmt.Fprintf(io.Out, "Postgres cluster %s is now attached to %s\n", pgPlan.AppName, state.Plan.AppName) } } - if err != nil { const msg = "Error creating Postgres database. Be warned that this may affect deploys" fmt.Fprintln(io.Out, io.ColorScheme().Red(msg)) } - - return err } return nil } -func (state *launchState) createSupabasePostgres(ctx context.Context) error { - postgresPlan := state.Plan.Postgres.SupabasePostgres +func (state *launchState) createManagedPostgres(ctx context.Context) error { + var ( + io = iostreams.FromContext(ctx) + pgPlan = state.Plan.Postgres.ManagedPostgres + uiexClient = uiexutil.ClientFromContext(ctx) + ) + // Get org org, err := state.Org(ctx) if err != nil { return err } - params := extensions_core.ExtensionParams{ - AppName: state.Plan.AppName, - Organization: org, - Provider: "supabase", - OverrideName: fly.Pointer(postgresPlan.GetDbName(state.Plan)), - OverrideRegion: postgresPlan.GetRegion(state.Plan), - ErrorCaptureCallback: supabase.CaptureFreeLimitError, + var slug string + if org.Slug == "personal" { + genqClient := flyutil.ClientFromContext(ctx).GenqClient() + + // For ui-ex request we need the real org slug + var fullOrg *gql.GetOrganizationResponse + if fullOrg, err = gql.GetOrganization(ctx, genqClient, org.Slug); err != nil { + return fmt.Errorf("failed fetching org: %w", err) + } + + slug = fullOrg.Organization.RawSlug + } else { + slug = org.Slug } - _, err = extensions_core.ProvisionExtension(ctx, params) + // Create cluster using the same parameters as mpg create + params := &mpg.CreateClusterParams{ + Name: pgPlan.DbName, + OrgSlug: slug, + Region: pgPlan.Region, + Plan: pgPlan.Plan, + VolumeSizeGB: pgPlan.DiskSize, + } + // Create cluster using the UI-EX client with retry logic for network errors + input := uiex.CreateClusterInput{ + Name: params.Name, + Region: params.Region, + Plan: params.Plan, + OrgSlug: params.OrgSlug, + Disk: params.VolumeSizeGB, + } + + fmt.Fprintf(io.Out, "Provisioning Managed Postgres cluster...\n") + + var response uiex.CreateClusterResponse + err = retry.Do( + func() error { + var retryErr error + response, retryErr = uiexClient.CreateCluster(ctx, input) + return retryErr + }, + retry.Context(ctx), + retry.Attempts(3), + retry.Delay(1*time.Second), + retry.DelayType(retry.BackOffDelay), + retry.OnRetry(func(n uint, err error) { + fmt.Fprintf(io.Out, "Retrying cluster creation (attempt %d) due to: %v\n", n+1, err) + }), + ) if err != nil { + return fmt.Errorf("failed creating managed postgres cluster: %w", err) + } + + // Wait for cluster to be ready + fmt.Fprintf(io.Out, "Waiting for cluster %s (%s) to be ready...\n", params.Name, response.Data.Id) + fmt.Fprintf(io.Out, "This may take up to 15 minutes. If this is taking too long, you can press Ctrl+C to continue with deployment.\n") + fmt.Fprintf(io.Out, "You can check the status later with 'fly mpg status' and attach with 'fly mpg attach'.\n") + + // Create a separate context for the wait loop with 15 minute timeout + waitCtx := context.Background() + waitCtx, cancel := context.WithTimeout(waitCtx, 15*time.Minute) + defer cancel() + + // Use retry.Do with a 15-minute timeout and exponential backoff + err = retry.Do( + func() error { + cluster, err := uiexClient.GetManagedClusterById(ctx, response.Data.Id) + if err != nil { + // For network errors, return the error to trigger retry + if containsNetworkError(err.Error()) { + return err + } + // For other errors, make them unrecoverable + return retry.Unrecoverable(fmt.Errorf("failed checking cluster status: %w", err)) + } + + if cluster.Data.Status == "ready" { + return nil // Success! + } + + if cluster.Data.Status == "error" { + return retry.Unrecoverable(fmt.Errorf("cluster creation failed")) + } + + // Return an error to continue retrying if status is not ready + return fmt.Errorf("cluster status is %s, waiting for ready", cluster.Data.Status) + }, + retry.Context(waitCtx), + retry.Attempts(0), // Unlimited attempts within the timeout + retry.Delay(2*time.Second), + retry.MaxDelay(30*time.Second), + retry.DelayType(retry.BackOffDelay), + retry.OnRetry(func(n uint, err error) { + // Log network-related errors and periodic status updates + if containsNetworkError(err.Error()) { + fmt.Fprintf(io.Out, "Retrying status check due to network issue: %v\n", err) + } else if n%10 == 0 && n > 0 { // Log every 10th attempt to show progress + fmt.Fprintf(io.Out, "Still waiting for cluster to be ready (attempt %d)...\n", n+1) + } + }), + ) + + // Handle the result + if err != nil { + // Check if we hit the timeout + if waitCtx.Err() == context.DeadlineExceeded { + fmt.Fprintf(io.Out, "\nCluster creation is taking longer than expected. Continuing with deployment.\n") + fmt.Fprintf(io.Out, "You can check the status later with 'fly mpg status' and attach with 'fly mpg attach'.\n") + return nil + } + // Check if the user cancelled + if ctx.Err() == context.Canceled { + fmt.Fprintf(io.Out, "\nContinuing with deployment. You can check the status later with 'fly mpg status' and attach with 'fly mpg attach'.\n") + return nil + } return err } - return err + // Get the cluster credentials with retry logic + var cluster uiex.GetManagedClusterResponse + err = retry.Do( + func() error { + var retryErr error + cluster, retryErr = uiexClient.GetManagedClusterById(ctx, response.Data.Id) + return retryErr + }, + retry.Context(ctx), + retry.Attempts(3), + retry.Delay(1*time.Second), + retry.DelayType(retry.BackOffDelay), + retry.OnRetry(func(n uint, err error) { + fmt.Fprintf(io.Out, "Retrying credential retrieval (attempt %d) due to: %v\n", n+1, err) + }), + ) + if err != nil { + return fmt.Errorf("failed retrieving cluster credentials: %w", err) + } + + // Set the connection string as a secret + secrets := map[string]string{ + "DATABASE_URL": cluster.Credentials.ConnectionUri, + } + + flapsClient := flapsutil.ClientFromContext(ctx) + if err := appsecrets.Update(ctx, flapsClient, state.Plan.AppName, secrets, nil); err != nil { + return fmt.Errorf("failed setting database secrets: %w", err) + } + + fmt.Fprintf(io.Out, "Managed Postgres cluster %s is ready and attached to %s\n", response.Data.Id, state.Plan.AppName) + fmt.Fprintf(io.Out, "The following secret was added to %s:\n DATABASE_URL=%s\n", state.Plan.AppName, cluster.Credentials.ConnectionUri) + + return nil +} + +// containsNetworkError checks if an error message contains network-related error indicators +func containsNetworkError(errMsg string) bool { + networkErrors := []string{ + "connection reset by peer", + "connection refused", + "timeout", + "network is unreachable", + "temporary failure in name resolution", + "i/o timeout", + } + + for _, netErr := range networkErrors { + if contains(errMsg, netErr) { + return true + } + } + return false +} + +// contains checks if a string contains a substring (case-insensitive) +func contains(s, substr string) bool { + return len(s) >= len(substr) && + (s == substr || + len(s) > len(substr) && + (stringContains(s, substr))) +} + +func stringContains(s, substr string) bool { + for i := 0; i <= len(s)-len(substr); i++ { + if s[i:i+len(substr)] == substr { + return true + } + } + return false } func (state *launchState) createUpstashRedis(ctx context.Context) error { diff --git a/internal/command/launch/launch_extensions.go b/internal/command/launch/launch_extensions.go index 1663778d1d..a306651f21 100644 --- a/internal/command/launch/launch_extensions.go +++ b/internal/command/launch/launch_extensions.go @@ -19,7 +19,11 @@ func (state *launchState) launchSentry(ctx context.Context, app_name string) err } if extension.SetsSecrets { - if err = secrets.DeploySecrets(ctx, gql.ToAppCompact(*extension.App), false, false); err != nil { + if err = secrets.DeploySecrets(ctx, gql.ToAppCompact(*extension.App), secrets.DeploymentArgs{ + Stage: false, + Detach: false, + CheckDNS: true, + }); err != nil { return err } } diff --git a/internal/command/launch/launch_frameworks.go b/internal/command/launch/launch_frameworks.go index dc90e8f4f3..c2bc82f84f 100644 --- a/internal/command/launch/launch_frameworks.go +++ b/internal/command/launch/launch_frameworks.go @@ -15,8 +15,10 @@ import ( "github.com/superfly/flyctl/gql" "github.com/superfly/flyctl/helpers" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/command/launch/plan" "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flapsutil" "github.com/superfly/flyctl/internal/flyutil" "github.com/superfly/flyctl/internal/prompt" "github.com/superfly/flyctl/iostreams" @@ -24,6 +26,10 @@ import ( ) func (state *launchState) setupGitHubActions(ctx context.Context, appName string) error { + if flag.GetBool(ctx, "no-github-workflow") || flag.GetString(ctx, "from") != "" { + return nil + } + state.sourceInfo.Files = append(state.sourceInfo.Files, state.sourceInfo.GitHubActions.Files...) if state.sourceInfo.GitHubActions.Secrets { @@ -59,7 +65,6 @@ func (state *launchState) setupGitHubActions(ctx context.Context, appName string return fmt.Errorf("failed creating token: %w", err) } else { token := resp.CreateLimitedAccessToken.LimitedAccessToken.TokenHeader - fmt.Println(token) fmt.Println("Setting FLY_API_TOKEN secret in GitHub repository settings") cmd := exec.Command(gh, "secret", "set", "FLY_API_TOKEN") @@ -115,9 +120,13 @@ func (state *launchState) scannerCreateFiles(ctx context.Context) error { fmt.Fprintf(io.Out, "You specified --now, so not overwriting %s\n", path) continue } - confirm, err := prompt.ConfirmOverwrite(ctx, path) - if !confirm || err != nil { - continue + if !flag.GetBool(ctx, "yes") { + confirm, err := prompt.ConfirmOverwrite(ctx, path) + if !confirm || err != nil { + continue + } + } else { + fmt.Fprintf(io.Out, "You specified --yes, overwriting %s\n", path) } } @@ -169,8 +178,8 @@ func (state *launchState) scannerCreateSecrets(ctx context.Context) error { } if len(secrets) > 0 { - apiClient := flyutil.ClientFromContext(ctx) - _, err := apiClient.SetSecrets(ctx, state.Plan.AppName, secrets) + flapsClient := flapsutil.ClientFromContext(ctx) + err := appsecrets.Update(ctx, flapsClient, state.Plan.AppName, secrets, nil) if err != nil { return err } @@ -200,6 +209,10 @@ func (state *launchState) scannerRunCallback(ctx context.Context) error { state.sourceInfo.ReleaseCmd = cfg.Deploy.ReleaseCommand } + if state.sourceInfo.SeedCmd == "" && cfg.Deploy != nil { + state.sourceInfo.SeedCmd = cfg.Deploy.SeedCommand + } + if len(cfg.Env) > 0 { if len(state.sourceInfo.Env) == 0 { state.sourceInfo.Env = cfg.Env @@ -236,6 +249,13 @@ func (state *launchState) scannerRunInitCommands(ctx context.Context) error { } } } + + if state.sourceInfo != nil && state.sourceInfo.PostInitCallback != nil { + if err := state.sourceInfo.PostInitCallback(); err != nil { + return err + } + } + return nil } @@ -302,8 +322,11 @@ func (state *launchState) scannerSetAppconfig(ctx context.Context) error { var appVolumes []appconfig.Mount for _, v := range srcInfo.Volumes { appVolumes = append(appVolumes, appconfig.Mount{ - Source: v.Source, - Destination: v.Destination, + Source: v.Source, + Destination: v.Destination, + AutoExtendSizeThreshold: v.AutoExtendSizeThreshold, + AutoExtendSizeIncrement: v.AutoExtendSizeIncrement, + AutoExtendSizeLimit: v.AutoExtendSizeLimit, }) } appConfig.SetMounts(appVolumes) @@ -326,6 +349,11 @@ func (state *launchState) scannerSetAppconfig(ctx context.Context) error { appConfig.SetReleaseCommand(srcInfo.ReleaseCmd) } + if srcInfo.SeedCmd != "" { + // no V1 compatibility for this feature so bypass setters + appConfig.Deploy.SeedCommand = srcInfo.SeedCmd + } + if srcInfo.DockerCommand != "" { appConfig.SetDockerCommand(srcInfo.DockerCommand) } diff --git a/internal/command/launch/plan/postgres.go b/internal/command/launch/plan/postgres.go index d9a83f3ff7..3e19f3443d 100644 --- a/internal/command/launch/plan/postgres.go +++ b/internal/command/launch/plan/postgres.go @@ -1,12 +1,20 @@ package plan import ( + "context" + "fmt" + fly "github.com/superfly/fly-go" + "github.com/superfly/flyctl/internal/command/mpg" + "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/prompt" + "github.com/superfly/flyctl/iostreams" ) type PostgresPlan struct { FlyPostgres *FlyPostgresPlan `json:"fly_postgres"` SupabasePostgres *SupabasePostgresPlan `json:"supabase_postgres"` + ManagedPostgres *ManagedPostgresPlan `json:"managed_postgres"` } func (p *PostgresPlan) Provider() any { @@ -19,12 +27,76 @@ func (p *PostgresPlan) Provider() any { if p.SupabasePostgres != nil { return p.SupabasePostgres } + if p.ManagedPostgres != nil { + return p.ManagedPostgres + } return nil } -func DefaultPostgres(plan *LaunchPlan) PostgresPlan { +// DefaultPostgres returns the default postgres configuration, with support for forcing postgres type and interactive region selection +func DefaultPostgres(ctx context.Context, plan *LaunchPlan, mpgEnabled bool) (PostgresPlan, error) { + io := iostreams.FromContext(ctx) + isInteractive := io != nil && io.IsInteractive() + + // Check the --db flag value to determine postgres type preference + dbFlag := flag.GetString(ctx, "db") + + // Normalize db flag values + var forceType string + switch dbFlag { + case "true", "1", "yes": + forceType = "default" // Use default behavior + case "mpg", "managed": + forceType = "managed" // Force managed postgres + case "upg", "unmanaged", "legacy": + forceType = "unmanaged" // Force unmanaged postgres + default: + forceType = "default" // Default behavior for empty or unrecognized values + } + + // If forced to use unmanaged postgres, use it regardless of mpgEnabled + if forceType == "unmanaged" { + return createFlyPostgresPlan(plan), nil + } + + // If forced to use managed postgres, handle region availability + if forceType == "managed" { + return handleForcedManagedPostgres(ctx, plan) + } + + // Normal flow: prefer managed if enabled and available + orgSlug, err := mpg.ResolveOrganizationSlug(ctx, plan.OrgSlug) + if err == nil && mpgEnabled { + // 2025-08-06: only default to MPG in interactive for now, we should update this down the road + validRegion, err := mpg.IsValidMPGRegion(ctx, orgSlug, plan.RegionCode) + if isInteractive { + if err == nil && validRegion { + // Managed postgres is available in this region, use it + return createManagedPostgresPlan(ctx, plan, "basic"), nil + } + + // Offer to switch to a nearby region that supports managed postgres + return handleInteractiveRegionSwitch(ctx, plan, orgSlug) + } else { + // Non-interactive: log warning and fall back to FlyPostgres + if io != nil && err == nil { + if validRegion { + fmt.Fprintf(io.ErrOut, "Warning: Using Unmanaged Postgres because non-interactive launch defaults to Unmanaged Postgres to maintain backwards compatibility for now\n") + } else { + fmt.Fprintf(io.ErrOut, "Warning: Using Unmanaged Postgres because Managed Postgres isn't yet available in region %s\n", plan.RegionCode) + } + } + } + } + + // Default to FlyPostgres + fmt.Fprintf(io.ErrOut, "Deprecation Warning: We will soon default to Managed Postgres when launching new apps in compatible regions. Pass --db=mpg to use Managed Postgres now and --db=upg to use Unmanaged Postgres.\n") + return createFlyPostgresPlan(plan), nil +} + +// createFlyPostgresPlan creates a FlyPostgres plan with default values +func createFlyPostgresPlan(plan *LaunchPlan) PostgresPlan { return PostgresPlan{ - // TODO: Once supabase is GA, we want to default to Supabase FlyPostgres: &FlyPostgresPlan{ // NOTE: Until Legacy Launch is removed, we have to maintain // "%app_name%-db" as the app name for the database. @@ -35,10 +107,115 @@ func DefaultPostgres(plan *LaunchPlan) PostgresPlan { VmRam: 256, Nodes: 1, DiskSizeGB: 1, + Price: -1, }, } } +// createManagedPostgresPlan creates a managed postgres plan and displays cost information +func createManagedPostgresPlan(ctx context.Context, plan *LaunchPlan, planType string) PostgresPlan { + io := iostreams.FromContext(ctx) + + // Display plan details if we have an IO context + if io != nil && planType != "" { + if planDetails, exists := mpg.MPGPlans[planType]; exists { + fmt.Fprintf(io.Out, "\nSelected Managed Postgres Plan: %s\n", planDetails.Name) + fmt.Fprintf(io.Out, " CPU: %s\n", planDetails.CPU) + fmt.Fprintf(io.Out, " Memory: %s\n", planDetails.Memory) + fmt.Fprintf(io.Out, " Price: $%d per month\n\n", planDetails.PricePerMo) + } + } + + return PostgresPlan{ + ManagedPostgres: &ManagedPostgresPlan{ + DbName: plan.AppName + "-db", + Region: plan.RegionCode, + Plan: planType, + DiskSize: 10, // Default managed postgres disk size + }, + } +} + +// handleForcedManagedPostgres handles the case where managed postgres is forced but may not be available +func handleForcedManagedPostgres(ctx context.Context, plan *LaunchPlan) (PostgresPlan, error) { + io := iostreams.FromContext(ctx) + + orgSlug, err := mpg.ResolveOrganizationSlug(ctx, plan.OrgSlug) + if err != nil { + return createFlyPostgresPlan(plan), nil + } + + validRegion, err := mpg.IsValidMPGRegion(ctx, orgSlug, plan.RegionCode) + + if err == nil && validRegion { + // Region supports managed postgres + return createManagedPostgresPlan(ctx, plan, "basic"), nil + } + + // Region doesn't support managed postgres + isInteractive := io != nil && io.IsInteractive() + if isInteractive { + // Interactive: suggest switching to a supported region + return handleInteractiveRegionSwitch(ctx, plan, orgSlug) + } else { + // Non-interactive: fail with error + availableCodes, _ := mpg.GetAvailableMPGRegionCodes(ctx, orgSlug) + return PostgresPlan{}, fmt.Errorf("managed postgres is not available in region %s. Available regions: %v", plan.RegionCode, availableCodes) + } +} + +// handleInteractiveRegionSwitch prompts user to switch to a region that supports managed postgres +func handleInteractiveRegionSwitch(ctx context.Context, plan *LaunchPlan, orgSlug string) (PostgresPlan, error) { + io := iostreams.FromContext(ctx) + + // Get available MPG regions + availableRegions, err := mpg.GetAvailableMPGRegions(ctx, orgSlug) + if err != nil || len(availableRegions) == 0 { + if io != nil { + fmt.Fprintf(io.ErrOut, "Warning: Unable to find regions that support Managed Postgres. Using Unmanaged Postgres in region %s\n", plan.RegionCode) + } + return createFlyPostgresPlan(plan), nil + } + + // Ask user if they want to switch regions + if io != nil { + fmt.Fprintf(io.Out, "Managed Postgres is not available in region %s.\n", plan.RegionCode) + } + + confirmed, err := prompt.Confirm(ctx, "Would you like to switch to a region that supports Managed Postgres?") + if err != nil || !confirmed { + if io != nil { + fmt.Fprintf(io.ErrOut, "Using Unmanaged Postgres in region %s\n", plan.RegionCode) + } + return createFlyPostgresPlan(plan), nil + } + + // Present region options + var regionOptions []string + for _, region := range availableRegions { + regionOptions = append(regionOptions, fmt.Sprintf("%s (%s)", region.Name, region.Code)) + } + + var selectedIndex int + if err := prompt.Select(ctx, &selectedIndex, "Select a region for Managed Postgres", "", regionOptions...); err != nil { + if io != nil { + fmt.Fprintf(io.ErrOut, "Failed to select region. Using Unmanaged Postgres in region %s\n", plan.RegionCode) + } + return createFlyPostgresPlan(plan), nil + } + + // Update the plan with the new region - this changes the overall app region, + // not just the postgres region, so the entire app launches in the MPG-supported region + selectedRegion := availableRegions[selectedIndex] + plan.RegionCode = selectedRegion.Code + + if io != nil { + fmt.Fprintf(io.Out, "Switched to region %s (%s) for Managed Postgres support.\nYour app will now launch in this region.\n", selectedRegion.Name, selectedRegion.Code) + } + + return createManagedPostgresPlan(ctx, plan, "basic"), nil +} + type FlyPostgresPlan struct { AppName string `json:"app_name"` VmSize string `json:"vm_size"` @@ -46,6 +223,7 @@ type FlyPostgresPlan struct { Nodes int `json:"nodes"` DiskSizeGB int `json:"disk_size_gb"` AutoStop bool `json:"auto_stop"` + Price int `json:"price"` } func (p *FlyPostgresPlan) Guest() *fly.MachineGuest { @@ -75,3 +253,25 @@ func (p *SupabasePostgresPlan) GetRegion(plan *LaunchPlan) string { } return p.Region } + +type ManagedPostgresPlan struct { + DbName string `json:"db_name"` + Region string `json:"region"` + Plan string `json:"plan"` + DiskSize int `json:"disk_size"` + ClusterID string `json:"cluster_id,omitempty"` +} + +func (p *ManagedPostgresPlan) GetDbName(plan *LaunchPlan) string { + if p.DbName == "" { + return plan.AppName + "-db" + } + return p.DbName +} + +func (p *ManagedPostgresPlan) GetRegion(plan *LaunchPlan) string { + if p.Region == "" { + return plan.RegionCode + } + return p.Region +} diff --git a/internal/command/launch/plan/postgres_test.go b/internal/command/launch/plan/postgres_test.go new file mode 100644 index 0000000000..80cc9c63b3 --- /dev/null +++ b/internal/command/launch/plan/postgres_test.go @@ -0,0 +1,362 @@ +package plan + +import ( + "context" + "testing" + + genq "github.com/Khan/genqlient/graphql" + "github.com/spf13/pflag" + fly "github.com/superfly/fly-go" + "github.com/superfly/flyctl/internal/flag/flagctx" + "github.com/superfly/flyctl/internal/flyutil" + "github.com/superfly/flyctl/internal/mock" + "github.com/superfly/flyctl/internal/uiex" + "github.com/superfly/flyctl/internal/uiexutil" + "github.com/superfly/flyctl/iostreams" +) + +// mockUIEXClient implements uiexutil.Client for testing +type mockUIEXClient struct { + mpgRegions []uiex.MPGRegion +} + +func (m *mockUIEXClient) ListMPGRegions(ctx context.Context, orgSlug string) (uiex.ListMPGRegionsResponse, error) { + return uiex.ListMPGRegionsResponse{Data: m.mpgRegions}, nil +} + +// mockGenqClient implements the genq.Client interface for testing +type mockGenqClient struct{} + +func (m *mockGenqClient) MakeRequest(ctx context.Context, req *genq.Request, resp *genq.Response) error { + // Mock the GetOrganization response - just return the same slug + // This simulates the ResolveOrganizationSlug behavior + resp.Data = map[string]interface{}{ + "organization": map[string]interface{}{ + "rawSlug": "test-org", // Return a fixed value for testing + }, + } + return nil +} + +func (m *mockUIEXClient) ListManagedClusters(ctx context.Context, orgSlug string) (uiex.ListManagedClustersResponse, error) { + return uiex.ListManagedClustersResponse{}, nil +} + +func (m *mockUIEXClient) GetManagedCluster(ctx context.Context, orgSlug string, id string) (uiex.GetManagedClusterResponse, error) { + return uiex.GetManagedClusterResponse{}, nil +} + +func (m *mockUIEXClient) GetManagedClusterById(ctx context.Context, id string) (uiex.GetManagedClusterResponse, error) { + return uiex.GetManagedClusterResponse{}, nil +} + +func (m *mockUIEXClient) CreateUser(ctx context.Context, id string, input uiex.CreateUserInput) (uiex.CreateUserResponse, error) { + return uiex.CreateUserResponse{}, nil +} + +func (m *mockUIEXClient) CreateCluster(ctx context.Context, input uiex.CreateClusterInput) (uiex.CreateClusterResponse, error) { + return uiex.CreateClusterResponse{}, nil +} + +func (m *mockUIEXClient) DestroyCluster(ctx context.Context, orgSlug string, id string) error { + return nil +} + +func (m *mockUIEXClient) CreateFlyManagedBuilder(ctx context.Context, orgSlug string, region string) (uiex.CreateFlyManagedBuilderResponse, error) { + return uiex.CreateFlyManagedBuilderResponse{}, nil +} + +func TestDefaultPostgres_ForceTypes(t *testing.T) { + tests := []struct { + name string + dbFlag string + mpgEnabled bool + mpgRegionsWithIAD bool // whether iad region supports MPG + expectedType string // "managed", "unmanaged", or "default" + expectError bool + }{ + { + name: "force managed postgres with region support", + dbFlag: "mpg", + mpgEnabled: true, + mpgRegionsWithIAD: true, + expectedType: "managed", + expectError: false, + }, + { + name: "force unmanaged postgres", + dbFlag: "upg", + mpgEnabled: true, + mpgRegionsWithIAD: true, + expectedType: "unmanaged", + expectError: false, + }, + { + name: "force legacy postgres", + dbFlag: "legacy", + mpgEnabled: true, + mpgRegionsWithIAD: true, + expectedType: "unmanaged", + expectError: false, + }, + { + name: "default non-interactive behavior with mpg enabled and region support", + dbFlag: "true", + mpgEnabled: true, + mpgRegionsWithIAD: true, + expectedType: "unmanaged", + expectError: false, + }, + { + name: "default non-interactive behavior with mpg enabled but no region support", + dbFlag: "true", + mpgEnabled: true, + mpgRegionsWithIAD: false, + expectedType: "unmanaged", + expectError: false, + }, + { + name: "default behavior with mpg disabled", + dbFlag: "true", + mpgEnabled: false, + mpgRegionsWithIAD: false, + expectedType: "unmanaged", + expectError: false, + }, + { + name: "force unmanaged overrides mpg enabled", + dbFlag: "upg", + mpgEnabled: true, + mpgRegionsWithIAD: true, + expectedType: "unmanaged", + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a context with iostreams + ctx := context.Background() + ctx = iostreams.NewContext(ctx, iostreams.System()) + + // Create a test context with flags + flagSet := pflag.NewFlagSet("test", pflag.ContinueOnError) + flagSet.String("db", tt.dbFlag, "") + ctx = flagctx.NewContext(ctx, flagSet) + + // Set up mock UIEX client for MPG regions + var mpgRegions []uiex.MPGRegion + if tt.mpgRegionsWithIAD { + mpgRegions = []uiex.MPGRegion{ + {Code: "iad", Available: true}, + {Code: "lax", Available: true}, + } + } else { + mpgRegions = []uiex.MPGRegion{ + {Code: "lax", Available: true}, + {Code: "fra", Available: true}, + // iad is not in the list, so it's not available + } + } + mockUIEX := &mockUIEXClient{mpgRegions: mpgRegions} + ctx = uiexutil.NewContextWithClient(ctx, mockUIEX) + + // Set up mock API client for platform regions + mockClient := &mock.Client{ + PlatformRegionsFunc: func(ctx context.Context) ([]fly.Region, *fly.Region, error) { + // Return some mock regions for testing + return []fly.Region{ + {Code: "iad", Name: "Ashburn, Virginia (US)"}, + {Code: "lax", Name: "Los Angeles, California (US)"}, + {Code: "fra", Name: "Frankfurt, Germany"}, + }, &fly.Region{Code: "iad", Name: "Ashburn, Virginia (US)"}, nil + }, + GenqClientFunc: func() genq.Client { + return &mockGenqClient{} + }, + } + ctx = flyutil.NewContextWithClient(ctx, mockClient) + + // Create a mock launch plan + plan := &LaunchPlan{ + AppName: "test-app", + OrgSlug: "test-org", + RegionCode: "iad", // Use iad region for testing + } + + result, err := DefaultPostgres(ctx, plan, tt.mpgEnabled) + + if tt.expectError { + if err == nil { + t.Errorf("expected error but got none") + return + } + } else { + if err != nil { + t.Errorf("expected no error but got: %v", err) + return + } + } + + // Check the type of postgres plan returned + switch tt.expectedType { + case "managed": + if result.ManagedPostgres == nil { + t.Errorf("expected managed postgres plan but got nil") + } + if result.FlyPostgres != nil { + t.Errorf("expected no fly postgres plan but got one") + } + case "unmanaged": + if result.FlyPostgres == nil { + t.Errorf("expected fly postgres plan but got nil") + } + if result.ManagedPostgres != nil { + t.Errorf("expected no managed postgres plan but got one") + } + } + }) + } +} + +// TestDefaultPostgres_RegionSwitching tests that when MPG region switching occurs, +// the overall LaunchPlan.RegionCode is updated, not just the postgres plan +func TestDefaultPostgres_RegionSwitching(t *testing.T) { + t.Run("region switching updates overall app region", func(t *testing.T) { + // Create a context with iostreams (non-interactive to avoid prompts) + ctx := context.Background() + ctx = iostreams.NewContext(ctx, iostreams.System()) + + // Create a test context with default db flag + flagSet := pflag.NewFlagSet("test", pflag.ContinueOnError) + flagSet.String("db", "true", "") + ctx = flagctx.NewContext(ctx, flagSet) + + // Set up mock UIEX client where iad doesn't support MPG but lax does + mpgRegions := []uiex.MPGRegion{ + {Code: "lax", Available: true}, + {Code: "fra", Available: true}, + // iad is not in the list, so it's not available + } + mockUIEX := &mockUIEXClient{mpgRegions: mpgRegions} + ctx = uiexutil.NewContextWithClient(ctx, mockUIEX) + + // Set up mock API client for platform regions + mockClient := &mock.Client{ + PlatformRegionsFunc: func(ctx context.Context) ([]fly.Region, *fly.Region, error) { + return []fly.Region{ + {Code: "iad", Name: "Ashburn, Virginia (US)"}, + {Code: "lax", Name: "Los Angeles, California (US)"}, + {Code: "fra", Name: "Frankfurt, Germany"}, + }, &fly.Region{Code: "iad", Name: "Ashburn, Virginia (US)"}, nil + }, + GenqClientFunc: func() genq.Client { + return &mockGenqClient{} + }, + } + ctx = flyutil.NewContextWithClient(ctx, mockClient) + + // Create a launch plan starting with iad region + plan := &LaunchPlan{ + AppName: "test-app", + OrgSlug: "test-org", + RegionCode: "iad", // Start with iad + } + + originalRegion := plan.RegionCode + + result, err := DefaultPostgres(ctx, plan, true) // mpgEnabled = true + + if err != nil { + t.Errorf("expected no error but got: %v", err) + return + } + + // In non-interactive mode, it should fall back to unmanaged postgres + // and NOT change the region (since user can't be prompted) + if result.FlyPostgres == nil { + t.Errorf("expected fly postgres plan but got nil") + } + if result.ManagedPostgres != nil { + t.Errorf("expected no managed postgres plan but got one") + } + + // Region should remain unchanged in non-interactive mode + if plan.RegionCode != originalRegion { + t.Errorf("expected region to remain %s but it changed to %s", originalRegion, plan.RegionCode) + } + }) +} + +func TestCreateFlyPostgresPlan(t *testing.T) { + plan := &LaunchPlan{ + AppName: "test-app", + OrgSlug: "test-org", + RegionCode: "iad", + } + + result := createFlyPostgresPlan(plan) + + if result.FlyPostgres == nil { + t.Errorf("expected FlyPostgres plan but got nil") + return + } + + if result.FlyPostgres.AppName != "test-app-db" { + t.Errorf("expected app name 'test-app-db' but got '%s'", result.FlyPostgres.AppName) + } + + if result.FlyPostgres.VmSize != "shared-cpu-1x" { + t.Errorf("expected vm size 'shared-cpu-1x' but got '%s'", result.FlyPostgres.VmSize) + } + + if result.FlyPostgres.VmRam != 256 { + t.Errorf("expected vm ram 256 but got %d", result.FlyPostgres.VmRam) + } + + if result.FlyPostgres.DiskSizeGB != 1 { + t.Errorf("expected disk size 1 but got %d", result.FlyPostgres.DiskSizeGB) + } + + if result.ManagedPostgres != nil { + t.Errorf("expected no managed postgres plan but got one") + } +} + +func TestCreateManagedPostgresPlan(t *testing.T) { + ctx := context.Background() + ctx = iostreams.NewContext(ctx, iostreams.System()) + + plan := &LaunchPlan{ + AppName: "test-app", + OrgSlug: "test-org", + RegionCode: "iad", + } + + result := createManagedPostgresPlan(ctx, plan, "basic") + + if result.ManagedPostgres == nil { + t.Errorf("expected ManagedPostgres plan but got nil") + return + } + + if result.ManagedPostgres.DbName != "test-app-db" { + t.Errorf("expected db name 'test-app-db' but got '%s'", result.ManagedPostgres.DbName) + } + + if result.ManagedPostgres.Region != "iad" { + t.Errorf("expected region 'iad' but got '%s'", result.ManagedPostgres.Region) + } + + if result.ManagedPostgres.Plan != "basic" { + t.Errorf("expected plan 'basic' but got '%s'", result.ManagedPostgres.Plan) + } + + if result.ManagedPostgres.DiskSize != 10 { + t.Errorf("expected disk size 10 but got %d", result.ManagedPostgres.DiskSize) + } + + if result.FlyPostgres != nil { + t.Errorf("expected no fly postgres plan but got one") + } +} diff --git a/internal/command/launch/plan_builder.go b/internal/command/launch/plan_builder.go index 5a27e8e04f..349d92d302 100644 --- a/internal/command/launch/plan_builder.go +++ b/internal/command/launch/plan_builder.go @@ -22,6 +22,7 @@ import ( "github.com/superfly/flyctl/internal/flyerr" "github.com/superfly/flyctl/internal/flyutil" "github.com/superfly/flyctl/internal/haikunator" + "github.com/superfly/flyctl/internal/launchdarkly" "github.com/superfly/flyctl/internal/prompt" "github.com/superfly/flyctl/iostreams" "github.com/superfly/flyctl/scanner" @@ -211,24 +212,54 @@ func buildManifest(ctx context.Context, parentConfig *appconfig.Config, recovera warnedNoCcHa: false, } - if planValidateHighAvailability(ctx, lp, org, true) { + if planValidateHighAvailability(ctx, lp, org.Billable, true) { buildCache.warnedNoCcHa = true } if srcInfo != nil { + ldClient, err := launchdarkly.NewServiceClient() + if err != nil { + return nil, nil, err + } + mpgEnabled := ldClient.ManagedPostgresEnabled() + lp.ScannerFamily = srcInfo.Family const scannerSource = "determined from app source" if !flag.GetBool(ctx, "no-db") { switch srcInfo.DatabaseDesired { case scanner.DatabaseKindPostgres: - lp.Postgres = plan.DefaultPostgres(lp) + lp.Postgres, err = plan.DefaultPostgres(ctx, lp, mpgEnabled) + if err != nil { + return nil, nil, err + } planSource.postgresSource = scannerSource + + // We offer switching to MPG if interactive session and the region is not the same as the MPG region + // App should launch in the MPG region + if lp.Postgres.ManagedPostgres != nil && lp.Postgres.ManagedPostgres.Region != region.Code { + lp.RegionCode = lp.Postgres.ManagedPostgres.Region + } case scanner.DatabaseKindMySQL: // TODO case scanner.DatabaseKindSqlite: // TODO } } + // Force Postgres provisioning if --db flag is set + dbFlag := flag.GetString(ctx, "db") + if dbFlag != "" { + lp.Postgres, err = plan.DefaultPostgres(ctx, lp, mpgEnabled) + if err != nil { + return nil, nil, err + } + planSource.postgresSource = "forced by --db flag" + + // We offer switching to MPG if interactive session and the region is not the same as the MPG region + // App should launch in the MPG region + if lp.Postgres.ManagedPostgres != nil && lp.Postgres.ManagedPostgres.Region != region.Code { + lp.RegionCode = lp.Postgres.ManagedPostgres.Region + } + } if !flag.GetBool(ctx, "no-redis") && srcInfo.RedisDesired { lp.Redis = plan.DefaultRedis(lp) planSource.redisSource = scannerSource @@ -774,8 +805,8 @@ func determineCompute(ctx context.Context, config *appconfig.Config, srcInfo *sc return []*appconfig.Compute{guestToCompute(guest)}, reason, nil } -func planValidateHighAvailability(ctx context.Context, p *plan.LaunchPlan, org *fly.Organization, print bool) bool { - if !org.Billable && p.HighAvailability { +func planValidateHighAvailability(ctx context.Context, p *plan.LaunchPlan, billable, print bool) bool { + if !billable && p.HighAvailability { if print { fmt.Fprintln(iostreams.FromContext(ctx).ErrOut, "Warning: This organization has no payment method, turning off high availability") } diff --git a/internal/command/launch/plan_commands.go b/internal/command/launch/plan_commands.go index da97e5116e..afae7b28a0 100644 --- a/internal/command/launch/plan_commands.go +++ b/internal/command/launch/plan_commands.go @@ -2,11 +2,14 @@ package launch import ( "context" + "os" "github.com/spf13/cobra" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/command/launch/plan" "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/logger" + "github.com/superfly/flyctl/iostreams" ) func NewPlan() *cobra.Command { @@ -75,6 +78,13 @@ func newPropose() *cobra.Command { Description: "Don't allow a \"blank\" app (nothing could be detected)", Default: true, }, + flag.Compression(), + flag.CompressionLevel(), + flag.Int{ + Name: "internal-port", + Description: "Set internal_port for all services in the generated fly.toml", + Default: -1, + }, ) return cmd @@ -92,6 +102,13 @@ func newCreate() *cobra.Command { Default: "", Hidden: true, }, + flag.Int{ + Name: "internal-port", + Description: "Set internal_port for all services in the generated fly.toml", + Default: -1, + }, + flag.Compression(), + flag.CompressionLevel(), ) return cmd @@ -170,6 +187,13 @@ func newGenerate() *cobra.Command { Default: "", Hidden: true, }, + flag.Compression(), + flag.CompressionLevel(), + flag.Int{ + Name: "internal-port", + Description: "Set internal_port for all services in the generated fly.toml", + Default: -1, + }, ) return cmd @@ -181,7 +205,12 @@ func RunPlan(ctx context.Context, step string) error { } func runPropose(ctx context.Context) error { - return RunPlan(ctx, "propose") + if flag.GetString(ctx, "manifest-path") == "" { + ctx = logger.NewContext(context.Background(), logger.New(os.Stderr, logger.FromContext(ctx).Level(), iostreams.IsTerminalWriter(os.Stdout))) + } + + RunPlan(ctx, "propose") + return nil } func runCreate(ctx context.Context) error { diff --git a/internal/command/launch/sessions.go b/internal/command/launch/sessions.go index a6c9ede18e..f7bcc62ff3 100644 --- a/internal/command/launch/sessions.go +++ b/internal/command/launch/sessions.go @@ -288,7 +288,7 @@ func runSessionFinalize(ctx context.Context) (err error) { // This should never be changed by the UI!! state.Plan.ScannerFamily = oldPlan.ScannerFamily - updateConfig(state.Plan, nil, state.Config) + state.updateConfig(ctx, state.Plan, nil, state.Config) manifestPath := flag.GetString(ctx, "manifest-path") diff --git a/internal/command/launch/sourceinfo.go b/internal/command/launch/sourceinfo.go index 3843df2da0..66541f5525 100644 --- a/internal/command/launch/sourceinfo.go +++ b/internal/command/launch/sourceinfo.go @@ -78,7 +78,7 @@ func determineSourceInfo(ctx context.Context, appConfig *appconfig.Config, copyC if srcInfo == nil { var colorFn func(arg interface{}) aurora.Value - noBlank := flag.GetBool(ctx, "no-blank") + noBlank := planStep == "propose" || flag.GetBool(ctx, "no-blank") if noBlank { colorFn = aurora.Red } else { @@ -98,6 +98,7 @@ func determineSourceInfo(ctx context.Context, appConfig *appconfig.Config, copyC fmt.Fprintln(io.Out, e.Name()) } } + return nil, nil, errors.New("Could not detect runtime or Dockerfile") } return srcInfo, nil, err @@ -128,7 +129,7 @@ func determineSourceInfo(ctx context.Context, appConfig *appconfig.Config, copyC } func articleFor(w string) string { - var article string = "a" + var article = "a" if matched, _ := regexp.MatchString(`^[aeiou]`, strings.ToLower(w)); matched { article += "n" } diff --git a/internal/command/launch/state.go b/internal/command/launch/state.go index 1d01e6b0ee..3680842af1 100644 --- a/internal/command/launch/state.go +++ b/internal/command/launch/state.go @@ -58,6 +58,15 @@ func cacheGrab[T any](cache map[string]interface{}, key string, cb func() (T, er return val, nil } +func (state *launchState) orgCompact(ctx context.Context) (*gql.GetOrganizationOrganization, error) { + client := flyutil.ClientFromContext(ctx).GenqClient() + res, err := gql.GetOrganization(ctx, client, state.Plan.OrgSlug) + if err != nil { + return nil, fmt.Errorf("failed to get org %q for state: %w", state.Plan.OrgSlug, err) + } + return &res.Organization, nil +} + func (state *launchState) Org(ctx context.Context) (*fly.Organization, error) { apiClient := flyutil.ClientFromContext(ctx) return cacheGrab(state.cache, "org,"+state.Plan.OrgSlug, func() (*fly.Organization, error) { @@ -105,7 +114,7 @@ func (state *launchState) PlanSummary(ctx context.Context) (string, error) { guestStr += fmt.Sprintf(", %d more", len(state.appConfig.Compute)-1) } - org, err := state.Org(ctx) + org, err := state.orgCompact(ctx) if err != nil { return "", err } @@ -120,7 +129,7 @@ func (state *launchState) PlanSummary(ctx context.Context) (string, error) { return "", err } - redisStr, err := describeRedisPlan(ctx, state.Plan.Redis, org) + redisStr, err := describeRedisPlan(ctx, state.Plan.Redis) if err != nil { return "", err } @@ -185,7 +194,7 @@ func (state *launchState) validateExtensions(ctx context.Context) error { io := iostreams.FromContext(ctx) noConfirm := !io.IsInteractive() || flag.GetBool(ctx, "now") - org, err := state.Org(ctx) + org, err := state.orgCompact(ctx) if err != nil { return err } diff --git a/internal/command/launch/webui.go b/internal/command/launch/webui.go index 81fd1ccc7c..ffdeafe379 100644 --- a/internal/command/launch/webui.go +++ b/internal/command/launch/webui.go @@ -16,6 +16,7 @@ import ( fly "github.com/superfly/fly-go" "github.com/superfly/flyctl/helpers" "github.com/superfly/flyctl/internal/command/launch/plan" + "github.com/superfly/flyctl/internal/command/mpg" "github.com/superfly/flyctl/internal/logger" state2 "github.com/superfly/flyctl/internal/state" "github.com/superfly/flyctl/internal/tracing" @@ -42,7 +43,6 @@ func (state *launchState) EditInWebUi(ctx context.Context) error { session.URL, ) } else { - colorize := io.ColorScheme() fmt.Fprintf(io.Out, "Opening %s ...\n\n", colorize.Bold(session.URL)) } @@ -86,6 +86,58 @@ func (state *launchState) EditInWebUi(ctx context.Context) error { // This should never be changed by the UI!! state.Plan.ScannerFamily = oldPlan.ScannerFamily + // Handle database plan from form data + if pgData, ok := finalSession.Metadata["postgres"].(map[string]interface{}); ok { + logger.Debugf("Postgres form data: %+v", pgData) + if mpgData, ok := pgData["managed_postgres"].(map[string]interface{}); ok { + logger.Debugf("Managed Postgres form data: %+v", mpgData) + // Validate region for managed Postgres + region := "iad" // Default region + if r, ok := mpgData["region"].(string); ok && r != "" { + region = r + } + + org, err := state.orgCompact(ctx) + if err != nil { + return fmt.Errorf("failed to get organization: %w", err) + } + + // Check if region is supported for managed Postgres + validRegion, err := mpg.IsValidMPGRegion(ctx, org.RawSlug, region) + if err != nil { + return fmt.Errorf("failed to validate MPG region: %w", err) + } + + if !validRegion { + availableCodes, _ := mpg.GetAvailableMPGRegionCodes(ctx, org.Slug) + return fmt.Errorf("region %s is not available for Managed Postgres. Available regions: %v", region, availableCodes) + } + + state.Plan.Postgres = plan.PostgresPlan{ + ManagedPostgres: &plan.ManagedPostgresPlan{ + DbName: state.Plan.AppName + "-db", + Region: region, + Plan: "basic", // Default plan + DiskSize: 10, // Default disk size + }, + } + + // Apply settings from the form + if dbName, ok := mpgData["db_name"].(string); ok && dbName != "" { + state.Plan.Postgres.ManagedPostgres.DbName = dbName + } + if plan, ok := mpgData["plan"].(string); ok && plan != "" { + state.Plan.Postgres.ManagedPostgres.Plan = plan + } + if disk, ok := mpgData["disk"].(float64); ok { + state.Plan.Postgres.ManagedPostgres.DiskSize = int(disk) + } + if clusterID, ok := mpgData["existing_mpg_hashid"].(string); ok && clusterID != "" { + state.Plan.Postgres.ManagedPostgres.ClusterID = clusterID + } + } + } + return nil } diff --git a/internal/command/machine/clone.go b/internal/command/machine/clone.go index b9d1d58482..0bfd5387f7 100644 --- a/internal/command/machine/clone.go +++ b/internal/command/machine/clone.go @@ -12,6 +12,7 @@ import ( fly "github.com/superfly/fly-go" "github.com/superfly/flyctl/helpers" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/flag" "github.com/superfly/flyctl/internal/flapsutil" @@ -255,11 +256,17 @@ func runMachineClone(ctx context.Context) (err error) { ) } + minvers, err := appsecrets.GetMinvers(appName) + if err != nil { + return err + } + input := fly.LaunchMachineInput{ - Name: flag.GetString(ctx, "name"), - Region: region, - Config: targetConfig, - SkipLaunch: len(targetConfig.Standbys) > 0, + Name: flag.GetString(ctx, "name"), + Region: region, + Config: targetConfig, + SkipLaunch: len(targetConfig.Standbys) > 0, + MinSecretsVersion: minvers, } fmt.Fprintf(out, "Provisioning a new Machine with image %s...\n", source.Config.Image) diff --git a/internal/command/machine/machine.go b/internal/command/machine/machine.go index 4d96784145..0f62bb7e0c 100644 --- a/internal/command/machine/machine.go +++ b/internal/command/machine/machine.go @@ -39,6 +39,7 @@ Machines REST fly.` newMachineUncordon(), newSuspend(), newEgressIp(), + newPlace(), ) return cmd diff --git a/internal/command/machine/place.go b/internal/command/machine/place.go new file mode 100644 index 0000000000..0221680e8f --- /dev/null +++ b/internal/command/machine/place.go @@ -0,0 +1,148 @@ +package machine + +import ( + "cmp" + "context" + "fmt" + "slices" + "strconv" + "strings" + + "github.com/docker/go-units" + "github.com/spf13/cobra" + "github.com/superfly/fly-go" + "github.com/superfly/fly-go/flaps" + "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/command" + "github.com/superfly/flyctl/internal/command/orgs" + "github.com/superfly/flyctl/internal/config" + "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flapsutil" + "github.com/superfly/flyctl/internal/flyutil" + "github.com/superfly/flyctl/internal/render" + "github.com/superfly/flyctl/iostreams" +) + +func newPlace() (cmd *cobra.Command) { + const ( + long = `Simulate a batch of Machine placements across multiple regions +` + short = "Simulate Machine placements" + ) + + cmd = command.New("place", short, long, runPlace, + command.RequireSession, + command.LoadAppNameIfPresent, + ) + + cmd.Args = cobra.NoArgs + flag.Add(cmd, + flag.AppConfig(), + flag.JSONOutput(), + flag.Org(), + flag.VMSizeFlags, + flag.Int{ + Name: "count", + Description: "number of machines to place", + }, + flag.String{ + Name: "region", + Description: "comma-delimited list of regions to place machines", + }, + flag.String{Name: "volume-name", Description: "name of the volume to place machines"}, + flag.Int{Name: "volume-size", Description: "size of the desired volume to place machines"}, + flag.StringSlice{ + Name: "weights", + Description: "comma-delimited list of key=value weights to adjust placement preferences. e.g., 'region=5,spread=10'", + }, + ) + return +} + +func runPlace(ctx context.Context) error { + flapsClient, err := flapsutil.NewClientWithOptions(ctx, flaps.NewClientOpts{}) + if err != nil { + return err + } + guest := &fly.MachineGuest{} + err = guest.SetSize("performance-1x") + if err != nil { + return err + } + guest, err = flag.GetMachineGuest(ctx, guest) + if err != nil { + return err + } + + orgSlug := flag.GetOrg(ctx) + if orgSlug == "" { + appName := appconfig.NameFromContext(ctx) + var org *fly.Organization + if appName == "" { + org, err = orgs.OrgFromFlagOrSelect(ctx) + } else { + org, err = flyutil.ClientFromContext(ctx).GetOrganizationByApp(ctx, appName) + } + if err != nil { + return err + } + orgSlug = org.Slug + } + + weights, err := getWeights(ctx) + if err != nil { + return err + } + region := flag.GetString(ctx, "region") + if region == "" { + region = "any" + } + regions, err := flapsClient.GetPlacements(ctx, &flaps.GetPlacementsRequest{ + ComputeRequirements: guest, + Region: region, + Count: uint64(flag.GetInt(ctx, "count")), + VolumeName: flag.GetString(ctx, "volume-name"), + VolumeSizeBytes: uint64(flag.GetInt(ctx, "volume-size") * units.GB), + Weights: weights, + Org: orgSlug, + }) + if err != nil { + return fmt.Errorf("failed getting machine placements: %w", err) + } + slices.SortFunc(regions, func(a, b flaps.RegionPlacement) int { return cmp.Compare(a.Region, b.Region) }) + + io := iostreams.FromContext(ctx) + out := io.Out + if config.FromContext(ctx).JSONOutput { + return render.JSON(out, regions) + } + + var rows [][]string + for _, region := range regions { + count := fmt.Sprint(region.Count) + row := []string{region.Region, count} + rows = append(rows, row) + } + cols := []string{"Region", "Count"} + return render.Table(out, "", rows, cols...) +} + +func getWeights(ctx context.Context) (*flaps.Weights, error) { + weightStr := flag.GetStringSlice(ctx, "weights") + if len(weightStr) == 0 { + return nil, nil + } + weights := make(flaps.Weights) + for _, weight := range weightStr { + parts := strings.SplitN(weight, "=", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid weight: %q", weight) + } + w, err := strconv.ParseInt(parts[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid weight: %q", weight) + } + weights[parts[0]] = w + } + return &weights, nil +} diff --git a/internal/command/machine/run.go b/internal/command/machine/run.go index e0a589cc91..a17c6d0aea 100644 --- a/internal/command/machine/run.go +++ b/internal/command/machine/run.go @@ -2,11 +2,8 @@ package machine import ( "context" - "encoding/json" "fmt" - "io" "math/rand" - "os" "strconv" "strings" "time" @@ -18,10 +15,13 @@ import ( "github.com/spf13/cobra" fly "github.com/superfly/fly-go" "github.com/superfly/fly-go/flaps" + "github.com/superfly/flyctl/agent" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/cmdutil" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/command/ssh" + "github.com/superfly/flyctl/internal/config" "github.com/superfly/flyctl/internal/flag" "github.com/superfly/flyctl/internal/flapsutil" "github.com/superfly/flyctl/internal/flyutil" @@ -256,6 +256,11 @@ func newRun() *cobra.Command { Description: "Open a shell on the Machine once created (implies --it --rm). If no app is specified, a temporary app is created just for this Machine and destroyed when the Machine is destroyed. See also --command and --user.", Hidden: false, }, + flag.String{ + Name: "container", + Description: "Container to update with the new image, files, etc; defaults to \"app\" or the first container in the config.", + Hidden: false, + }, ) cmd.Args = cobra.MinimumNArgs(0) @@ -372,10 +377,16 @@ func runMachineRun(ctx context.Context) error { }, } + minvers, err := appsecrets.GetMinvers(app.Name) + if err != nil { + return err + } + input := fly.LaunchMachineInput{ - Name: flag.GetString(ctx, "name"), - Region: flag.GetString(ctx, "region"), - LSVD: flag.GetBool(ctx, "lsvd"), + Name: flag.GetString(ctx, "name"), + Region: flag.GetString(ctx, "region"), + LSVD: flag.GetBool(ctx, "lsvd"), + MinSecretsVersion: minvers, } flapsClient, err := flapsutil.NewClientWithOptions(ctx, flaps.NewClientOpts{ @@ -390,8 +401,8 @@ func runMachineRun(ctx context.Context) error { imageOrPath := flag.FirstArg(ctx) if imageOrPath == "" && shell { imageOrPath = "ubuntu" - } else if imageOrPath == "" { - return fmt.Errorf("image argument can't be an empty string") + } else if flag.GetString(ctx, "dockerfile") != "" { + imageOrPath = "." } machineID := flag.GetString(ctx, "id") @@ -411,6 +422,10 @@ func runMachineRun(ctx context.Context) error { return err } + if imageOrPath == "" && len(machineConf.Containers) == 0 { + return fmt.Errorf("image argument can't be an empty string") + } + if flag.GetBool(ctx, "build-only") { return nil } @@ -455,7 +470,7 @@ func runMachineRun(ctx context.Context) error { } if interact { - _, dialer, err := ssh.BringUpAgent(ctx, client, app, *network, false) + _, dialer, err := agent.BringUpAgent(ctx, client, app, *network, false) if err != nil { return err } @@ -479,7 +494,7 @@ func runMachineRun(ctx context.Context) error { return err } - err = ssh.Console(ctx, sshClient, flag.GetString(ctx, "command"), true) + err = ssh.Console(ctx, sshClient, flag.GetString(ctx, "command"), true, "") if destroy { err = soManyErrors("console", err, "destroy machine", Destroy(ctx, app, machine, true)) } @@ -530,12 +545,13 @@ func getOrCreateEphemeralShellApp(ctx context.Context, client flyutil.Client) (* } if appc == nil { + shellAppName := fmt.Sprintf("flyctl-interactive-shells-%s-%d", strings.ToLower(org.ID), rand.Intn(1_000_000)) + shellAppName = strings.TrimRight(shellAppName[:min(len(shellAppName), 63)], "-") appc, err = client.CreateApp(ctx, fly.CreateAppInput{ OrganizationID: org.ID, - // i'll never find love again like the kind you give like the kind you send - Name: fmt.Sprintf("flyctl-interactive-shells-%s-%d", strings.ToLower(org.ID), rand.Intn(1_000_000)), + // I'll never find love again like the kind you give like the kind you send + Name: shellAppName, }) - if err != nil { return nil, fmt.Errorf("create interactive shell app: %w", err) } @@ -647,26 +663,34 @@ func determineMachineConfig( ) (*fly.MachineConfig, error) { machineConf := mach.CloneConfig(&input.initialMachineConf) - if emc := flag.GetString(ctx, "machine-config"); emc != "" { - var buf []byte - switch { - case strings.HasPrefix(emc, "{"): - buf = []byte(emc) - case strings.HasSuffix(emc, ".json"): - fo, err := os.Open(emc) - if err != nil { - return nil, err - } - buf, err = io.ReadAll(fo) - if err != nil { - return nil, err + if mc := flag.GetString(ctx, "machine-config"); mc != "" { + if err := config.ParseConfig(machineConf, mc); err != nil { + return nil, err + } + } + + // identify the container to use + // if no container is specified, look for "app" or the first container + var container *fly.ContainerConfig + if len(machineConf.Containers) > 0 { + match := flag.GetString(ctx, "container") + if match == "" { + match = "app" + } + + for _, c := range machineConf.Containers { + if c.Name == match { + container = c + break } - default: - return nil, fmt.Errorf("invalid machine config source: %q", emc) } - if err := json.Unmarshal(buf, machineConf); err != nil { - return nil, fmt.Errorf("invalid machine config %q: %w", emc, err) + if container == nil { + if flag.GetString(ctx, "container") != "" { + return nil, fmt.Errorf("container %q not found", flag.GetString(ctx, "container")) + } else { + container = machineConf.Containers[0] + } } } @@ -699,26 +723,30 @@ func determineMachineConfig( if input.updating { // Called from `update`. Command is specified by flag. - if command := flag.GetString(ctx, "command"); command != "" { - split, err := shlex.Split(command) - if err != nil { - return machineConf, errors.Wrap(err, "invalid command") + if flag.IsSpecified(ctx, "command") { + command := strings.TrimSpace(flag.GetString(ctx, "command")) + switch command { + case "": + machineConf.Init.Cmd = nil + default: + split, err := shlex.Split(command) + if err != nil { + return machineConf, errors.Wrap(err, "invalid command") + } + machineConf.Init.Cmd = split } - machineConf.Init.Cmd = split } } else { // Called from `run`. Command is specified by arguments. args := flag.Args(ctx) - if len(args) != 0 { + if len(args) > 1 { machineConf.Init.Cmd = args[1:] + } else if input.interact { + machineConf.Init.Exec = []string{"/bin/sleep", "inf"} } } - if input.interact { - machineConf.Init.Exec = []string{"/bin/sleep", "inf"} - } - if flag.IsSpecified(ctx, "skip-dns-registration") { if machineConf.DNS == nil { machineConf.DNS = &fly.DNSConfig{} @@ -797,7 +825,12 @@ func determineMachineConfig( if err != nil { return machineConf, err } - machineConf.Image = img.Tag + + if container != nil { + container.Image = img.String() + } else { + machineConf.Image = img.String() + } } // Service updates diff --git a/internal/command/machine/select.go b/internal/command/machine/select.go index dbcaf67bae..51bae55398 100644 --- a/internal/command/machine/select.go +++ b/internal/command/machine/select.go @@ -18,7 +18,7 @@ import ( ) // We now prompt for a machine automatically when no machine IDs are -// provided. This flag is retained for backward compatability. +// provided. This flag is retained for backward compatibility. var selectFlag = flag.Bool{ Name: "select", Description: "Select from a list of machines", diff --git a/internal/command/machine/status.go b/internal/command/machine/status.go index 2a27839cf8..30d3dc6568 100644 --- a/internal/command/machine/status.go +++ b/internal/command/machine/status.go @@ -121,7 +121,7 @@ func runMachineStatus(ctx context.Context) (err error) { }, } - var cols []string = []string{"ID", "Instance ID", "State", "Image", "Name", "Private IP", "Region", "Process Group", "CPU Kind", "vCPUs", "Memory", "Created", "Updated", "Entrypoint", "Command"} + var cols = []string{"ID", "Instance ID", "State", "Image", "Name", "Private IP", "Region", "Process Group", "CPU Kind", "vCPUs", "Memory", "Created", "Updated", "Entrypoint", "Command"} if len(mConfig.Mounts) > 0 { cols = append(cols, "Volume") diff --git a/internal/command/machine/update.go b/internal/command/machine/update.go index 7fd522cc0c..20920dd564 100644 --- a/internal/command/machine/update.go +++ b/internal/command/machine/update.go @@ -11,6 +11,7 @@ import ( "github.com/superfly/flyctl/iostreams" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/flag" "github.com/superfly/flyctl/internal/flyerr" @@ -61,6 +62,14 @@ func newUpdate() *cobra.Command { Description: "Seconds to wait for individual machines to transition states and become healthy. (default 300)", Default: 300, }, + flag.String{ + Name: "container", + Description: "Container to update with the new image, files, etc; defaults to \"app\" or the first container in the config.", + Hidden: false, + }, + flag.BuildkitAddr(), + flag.BuildkitImage(), + flag.Buildkit(), ) cmd.Args = cobra.RangeArgs(0, 1) @@ -137,14 +146,20 @@ func runUpdate(ctx context.Context) (err error) { } } + minvers, err := appsecrets.GetMinvers(appName) + if err != nil { + return err + } + // Perform update input := &fly.LaunchMachineInput{ - Name: machine.Name, - Region: machine.Region, - Config: machineConf, - SkipLaunch: len(machineConf.Standbys) > 0 || skipStart, - SkipHealthChecks: skipHealthChecks, - Timeout: flag.GetInt(ctx, "wait-timeout"), + Name: machine.Name, + Region: machine.Region, + Config: machineConf, + SkipLaunch: len(machineConf.Standbys) > 0 || skipStart, + SkipHealthChecks: skipHealthChecks, + Timeout: flag.GetInt(ctx, "wait-timeout"), + MinSecretsVersion: minvers, } if err := mach.Update(ctx, machine, input); err != nil { var timeoutErr mach.WaitTimeoutErr diff --git a/internal/command/mcp/config.go b/internal/command/mcp/config.go new file mode 100644 index 0000000000..4c79159a3e --- /dev/null +++ b/internal/command/mcp/config.go @@ -0,0 +1,736 @@ +package mcp + +import ( + "context" + "crypto/rand" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/url" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/apex/log" + "github.com/spf13/cobra" + "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/command" + "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flapsutil" + "github.com/superfly/flyctl/internal/logger" + "github.com/superfly/flyctl/internal/prompt" + + "github.com/superfly/fly-go/flaps" +) + +var McpClients = map[string]string{ + "claude": "Claude", + "vscode": "VS Code", + "cursor": "Cursor", + "neovim": "Neovim", + "windsurf": "Windsurf", + "zed": "Zed", +} + +// ConfigPath represents a configuration file path +type ConfigPath struct { + ToolName string + Path string + ConfigName string +} + +func NewAdd() *cobra.Command { + const ( + short = "[experimental] Add MCP proxy client to a MCP client configuration" + long = short + "\n" + usage = "add" + ) + + cmd := command.New(usage, short, long, runAdd, command.RequireAppName) + cmd.Args = cobra.ExactArgs(0) + + flag.Add(cmd, + flag.App(), + flag.StringArray{ + Name: "config", + Description: "Path to the MCP client configuration file (can be specified multiple times)", + }, + flag.String{ + Name: "url", + Description: "URL of the MCP wrapper server", + }, + flag.String{ + Name: "name", + Description: "Name to use for the MCP server in the MCP client configuration", + Hidden: true, + }, + flag.String{ + Name: "server", + Description: "Name to use for the MCP server in the MCP client configuration", + }, + flag.String{ + Name: "user", + Description: "User to authenticate with", + }, + flag.String{ + Name: "password", + Description: "Password to authenticate with", + }, + flag.Bool{ + Name: "bearer-token", + Description: "Use bearer token for authentication", + Default: true, + }, + flag.Bool{ + Name: "flycast", + Description: "Use wireguard and flycast for access", + }, + ) + + for client, name := range McpClients { + flag.Add(cmd, + flag.Bool{ + Name: client, + Description: "Add MCP server to the " + name + " client configuration", + }, + ) + } + + return cmd +} + +func NewRemove() *cobra.Command { + const ( + short = "[experimental] Remove MCP proxy client from a MCP client configuration" + long = short + "\n" + usage = "remove" + ) + cmd := command.New(usage, short, long, runRemove, command.LoadAppNameIfPresent) + cmd.Args = cobra.ExactArgs(0) + + flag.Add(cmd, + flag.App(), + flag.StringArray{ + Name: "config", + Description: "Path to the MCP client configuration file (can be specified multiple times)", + }, + flag.String{ + Name: "name", + Description: "Name to use for the MCP server in the MCP client configuration", + Hidden: true, + }, + flag.String{ + Name: "server", + Description: "Name to use for the MCP server in the MCP client configuration", + }, + ) + + for client, name := range McpClients { + flag.Add(cmd, + flag.Bool{ + Name: client, + Description: "Remove MCP server from the " + name + " client configuration", + }, + ) + } + return cmd +} + +func runAdd(ctx context.Context) error { + appConfig := appconfig.ConfigFromContext(ctx) + if appConfig == nil { + appName := appconfig.NameFromContext(ctx) + if appName == "" { + return errors.New("app name is required") + } else { + // Set up flaps client in context before calling FromRemoteApp + if flapsutil.ClientFromContext(ctx) == nil { + flapsClient, err := flapsutil.NewClientWithOptions(ctx, flaps.NewClientOpts{ + AppName: appName, + }) + if err != nil { + return fmt.Errorf("could not create flaps client: %w", err) + } + ctx = flapsutil.NewContextWithClient(ctx, flapsClient) + } + + var err error + appConfig, err = appconfig.FromRemoteApp(ctx, appName) + if err != nil { + return err + } + } + } + + url := flag.GetString(ctx, "url") + if url == "" { + if flag.GetBool(ctx, "flycast") { + url = "http://" + appConfig.AppName + ".flycast/" + } else { + appUrl := appConfig.URL() + if appUrl == nil { + return errors.New("The app doesn't expose a public http service") + } + url = appUrl.String() + } + } + + args := []string{"mcp", "proxy", "--url", url} + + user := flag.GetString(ctx, "user") + if user != "" { + args = append(args, "--user", user) + + password := flag.GetString(ctx, "password") + if password == "" { + err := prompt.Password(ctx, &password, "Password:", true) + if err != nil && !prompt.IsNonInteractive(err) { + return fmt.Errorf("failed to get password: %w", err) + } + args = append(args, "--password", password) + } + + if err := flyctl("secrets", "set", "FLY_MCP_USER="+user, "FLY_MCP_PASSWORD="+password, "--app", appConfig.AppName); err != nil { + return fmt.Errorf("failed to set user/password secrets': %w", err) + } + + } else if flag.GetBool(ctx, "bearer-token") { + // Generate a secure random 24 character base64 encoded string for bearerToken + b := make([]byte, 18) // 18 bytes = 24 base64 characters + _, err := rand.Read(b) + if err != nil { + return fmt.Errorf("failed to generate bearer token: %w", err) + } + bearerTokenStr := base64.StdEncoding.EncodeToString(b) + args = append(args, "--bearer-token", bearerTokenStr) + + if err := flyctl("secrets", "set", "FLY_MCP_BEARER_TOKEN="+bearerTokenStr, "--app", appConfig.AppName); err != nil { + return fmt.Errorf("failed to set bearer token secret': %w", err) + } + } + + configPaths, err := ListConfigPaths(ctx, true) + if err != nil { + return err + } else if len(configPaths) == 0 { + return errors.New("no configuration paths found") + } + + server := flag.GetString(ctx, "server") + if server == "" { + server = flag.GetString(ctx, "name") + if server == "" { + server = appConfig.AppName + } + } + + flyctlExecutable, err := os.Executable() + if err != nil { + return fmt.Errorf("failed to find executable: %w", err) + } + + for _, configPath := range configPaths { + err = UpdateConfig(ctx, configPath.Path, configPath.ConfigName, server, flyctlExecutable, args) + if err != nil { + return fmt.Errorf("failed to update configuration at %s: %w", configPath.Path, err) + } + } + + return nil +} + +// Build a list of configuration paths to update +func ListConfigPaths(ctx context.Context, configIsArray bool) ([]ConfigPath, error) { + log := logger.FromContext(ctx) + + var paths []ConfigPath + + // Get home directory + home, err := os.UserHomeDir() + if err != nil { + return nil, fmt.Errorf("failed to get home directory: %w", err) + } + + // OS-specific paths + var configDir string + if runtime.GOOS == "darwin" { + configDir = filepath.Join(home, "Library", "Application Support") + } else if runtime.GOOS == "windows" { + configDir = filepath.Join(home, "AppData", "Roaming") + } else { + configDir = filepath.Join(home, ".config") + } + + // Claude configuration + if flag.GetBool(ctx, "claude") { + claudePath := filepath.Join(configDir, "Claude", "claude_desktop_config.json") + log.Debugf("Adding Claude configuration path: %s", claudePath) + paths = append(paths, ConfigPath{ToolName: "claude", Path: claudePath}) + } + + // VS Code configuration + if flag.GetBool(ctx, "vscode") { + vscodePath := filepath.Join(configDir, "Code", "User", "settings.json") + log.Debugf("Adding VS Code configuration path: %s", vscodePath) + paths = append(paths, ConfigPath{ToolName: "vscode", Path: vscodePath, ConfigName: "mcp"}) + } + + // Cursor configuration + if flag.GetBool(ctx, "cursor") { + cursorPath := filepath.Join(configDir, "Cursor", "config.json") + log.Debugf("Adding Cursor configuration path: %s", cursorPath) + paths = append(paths, ConfigPath{ToolName: "cursor", Path: cursorPath}) + } + + // Neovim configuration + if flag.GetBool(ctx, "neovim") { + neovimPath := filepath.Join(configDir, "nvim", "init.json") + log.Debugf("Adding Neovim configuration path: %s", neovimPath) + paths = append(paths, ConfigPath{ToolName: "neovim", Path: neovimPath}) + } + + // Windsurf configuration + if flag.GetBool(ctx, "windsurf") { + windsurfPath := filepath.Join(home, ".codeium", "windsurf", "config.json") + log.Debugf("Adding Windsurf configuration path: %s", windsurfPath) + paths = append(paths, ConfigPath{ToolName: "windsurf", Path: windsurfPath}) + } + + // Zed configuration + if flag.GetBool(ctx, "zed") { + zedPath := filepath.Join(home, ".config", "zed", "settings.json") + log.Debugf("Adding Zed configuration path: %s", zedPath) + paths = append(paths, ConfigPath{ToolName: "zed", Path: zedPath, ConfigName: "context_servers"}) + } + + if configIsArray { + // Add custom configuration paths + for _, path := range flag.GetStringArray(ctx, "config") { + path, err := filepath.Abs(path) + if err != nil { + return nil, fmt.Errorf("failed to get absolute path for %s: %w", path, err) + } + log.Debugf("Adding custom configuration path: %s", path) + paths = append(paths, ConfigPath{Path: path}) + } + } else { + path := flag.GetString(ctx, "config") + if path != "" { + path, err := filepath.Abs(path) + if err != nil { + return nil, fmt.Errorf("failed to get absolute path for %s: %w", path, err) + } + log.Debugf("Adding custom configuration path: %s", path) + paths = append(paths, ConfigPath{Path: path}) + } + } + + for i, configPath := range paths { + if configPath.ConfigName == "" { + paths[i].ConfigName = "mcpServers" + } + } + + return paths, nil +} + +func ServerMap(configPaths []ConfigPath) (map[string]any, error) { + // build a server map from all of the configs + serverMap := make(map[string]any) + + for _, configPath := range configPaths { + // if the configuration file doesn't exist, skip it + if _, err := os.Stat(configPath.Path); err != nil { + if os.IsNotExist(err) { + continue + } + return nil, err + } + + // read the configuration file + file, err := os.Open(configPath.Path) + if err != nil { + return nil, err + } + defer file.Close() + + // parse the configuration file as JSON + var data map[string]any + decoder := json.NewDecoder(file) + if err := decoder.Decode(&data); err != nil { + return nil, fmt.Errorf("failed to parse %s: %w", configPath.Path, err) + } + + if mcpServers, ok := data[configPath.ConfigName].(map[string]any); ok { + // add metadata about the tool + config := make(map[string]any) + config["mcpServers"] = mcpServers + config["configName"] = configPath.ConfigName + + if configPath.ToolName != "" { + config["toolName"] = configPath.ToolName + } + + serverMap[configPath.Path] = config + + // add metadata about each MCP server + for name := range mcpServers { + if serverMap, ok := mcpServers[name].(map[string]any); ok { + server, err := configExtract(configPath, name) + if err != nil { + return nil, fmt.Errorf("failed to extract config for %s: %w", name, err) + } + + for key, value := range server { + if key != "command" && key != "args" { + serverMap[key] = value + } + } + + mcpServers[name] = serverMap + } + } + } + } + + return serverMap, nil +} + +func SelectServerAndConfig(ctx context.Context, configIsArray bool) (string, []ConfigPath, error) { + server := flag.GetString(ctx, "server") + + // Check if the user has specified any client flags + configSelected := false + for client := range McpClients { + configSelected = configSelected || flag.GetBool(ctx, client) + } + + // if no cllent is selected, select all clients + if !configSelected { + for client := range McpClients { + flag.SetString(ctx, client, "true") + } + } + + // Get a list of config paths + configPaths, err := ListConfigPaths(ctx, true) + if err != nil { + return "", nil, err + } + + var serverMap map[string]any + + if len(configPaths) > 1 || server == "" { + serverMap, err = ServerMap(configPaths) + if err != nil { + return "", nil, fmt.Errorf("failed to get server map: %w", err) + } + } + + if len(configPaths) == 0 { + return "", nil, errors.New("no configuration paths found") + } else if len(configPaths) > 1 && !configIsArray { + choices := make([]string, 0) + choiceMap := make(map[int]int) + for i, configPath := range configPaths { + if config, ok := serverMap[configPath.Path].(map[string]any); ok { + if servers, ok := config["mcpServers"].(map[string]any); ok && len(servers) > 0 { + if toolName, ok := config["toolName"].(string); ok { + choices = append(choices, toolName) + } else { + choices = append(choices, configPath.Path) + } + choiceMap[i] = len(choices) - 1 + } + } + } + + index := 0 + if len(choices) == 0 { + return "", nil, errors.New("no MCP servers found in the selected configuration files") + } else if len(choices) > 1 { + switch err = prompt.Select(ctx, &index, "Select a configuration file", "", choices...); { + case err == nil: + if choiceIndex, ok := choiceMap[index]; ok { + index = choiceIndex + } + case prompt.IsNonInteractive(err): + return "", nil, prompt.NonInteractiveError("MCP client or config file must be specified when not running interactively") + default: + return "", nil, fmt.Errorf("failed to select configuration file: %w", err) + } + + } + + configPaths = []ConfigPath{configPaths[index]} + } + + if server == "" { + if len(serverMap) == 0 { + return "", configPaths, errors.New("no MCP servers found in the selected configuration files") + } + // Select a server from the server map + var index int + choices := make([]string, 0) + for _, configPath := range serverMap { + if config, ok := configPath.(map[string]any); ok { + if servers, ok := config["mcpServers"].(map[string]any); ok { + for name := range servers { + choices = append(choices, name) + } + } + } + } + + if len(choices) == 0 { + return "", configPaths, errors.New("no MCP servers found in the selected configuration files") + } else if len(choices) == 1 { + server = choices[0] + log.Debugf("Only one MCP server found: %s", server) + } else { + switch err = prompt.Select(ctx, &index, "Select a MCP server", "", choices...); { + case err == nil: + server = choices[index] + log.Debugf("Selected MCP server: %s", server) + case prompt.IsNonInteractive(err): + return "", nil, prompt.NonInteractiveError("server must be specified when not running interactively") + default: + return "", configPaths, fmt.Errorf("failed to select MCP server: %w", err) + } + } + } + + return server, configPaths, nil +} + +// UpdateConfig updates the configuration at the specified path with the MCP servers +func UpdateConfig(ctx context.Context, path string, configKey string, server string, command string, args []string) error { + log.Debugf("Updating configuration at %s", path) + + if configKey == "" { + configKey = "mcpServers" + } + + // Create directory if it doesn't exist + dir := filepath.Dir(path) + err := os.MkdirAll(dir, 0755) + if err != nil { + return fmt.Errorf("failed to create directory %s: %w", dir, err) + } + + // Initialize configuration data map + configData := make(map[string]interface{}) + + // Read existing configuration if it exists + fileExists := false + fileData, err := os.ReadFile(path) + if err == nil { + fileExists = true + // File exists, parse it + err = json.Unmarshal(fileData, &configData) + if err != nil { + return fmt.Errorf("failed to parse existing configuration at %s: %w", path, err) + } else { + log.Debugf("Successfully read existing configuration at %s", path) + } + } else { + log.Debugf("Configuration file doesn't exist, will create a new one") + } + + // Get or create mcpServers field in config + var mcpServers map[string]interface{} + + if mcpServersRaw, exists := configData[configKey]; exists { + if mcpMap, ok := mcpServersRaw.(map[string]interface{}); ok { + mcpServers = mcpMap + log.Debugf("Found existing %s with %d entries", configKey, len(mcpServers)) + } else { + return fmt.Errorf("%s field exists in %s but is not a map", configKey, path) + } + } else { + log.Debugf("No %s field found, creating a new one", configKey) + mcpServers = make(map[string]interface{}) + } + + // Merge the new MCP server with existing ones + if _, exists := mcpServers[server]; exists { + log.Debugf("Replacing existing MCP server: %s", server) + } else { + log.Debugf("Adding new MCP server: %s", server) + } + + // Build the server map + serverMap := map[string]interface{}{ + "command": command, + "args": args, + } + + // Update the server in the existing map + mcpServers[server] = serverMap + + // Update the mcpServers field in the config + configData[configKey] = mcpServers + + // Write the updated configuration + updatedData, err := json.MarshalIndent(configData, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal updated configuration: %w", err) + } + + err = os.WriteFile(path, updatedData, 0644) + if err != nil { + return fmt.Errorf("Failed to write updated configuration to %s: %v", path, err) + } + + if fileExists { + log.Debugf("Successfully updated existing configuration at %s", path) + } else { + log.Debugf("Successfully created new configuration at %s", path) + } + + return nil +} + +func runRemove(ctx context.Context) error { + var err error + + server, configPaths, err := SelectServerAndConfig(ctx, false) + if err != nil { + return err + } + + for _, configPath := range configPaths { + err = removeConfig(ctx, configPath.Path, configPath.ConfigName, server) + if err != nil { + return fmt.Errorf("failed to update configuration at %s: %w", configPath.Path, err) + } + } + + return nil +} + +// removeConfig removes the MCP server from the configuration at the specified path +func removeConfig(ctx context.Context, path string, configKey string, name string) error { + log := logger.FromContext(ctx) + + log.Debugf("Removing from configuration at %s", path) + + // Read existing configuration if it exists + fileData, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("failed to read configuration at %s: %w", path, err) + } + + // Parse the existing configuration + configData := make(map[string]interface{}) + err = json.Unmarshal(fileData, &configData) + if err != nil { + return fmt.Errorf("failed to parse existing configuration at %s: %w", path, err) + } else { + log.Debugf("Successfully read existing configuration at %s", path) + } + + // Get the mcpServers field in config + var mcpServers map[string]interface{} + if mcpServersRaw, exists := configData[configKey]; exists { + if mcpMap, ok := mcpServersRaw.(map[string]interface{}); ok { + mcpServers = mcpMap + log.Debugf("Found existing %s with %d entries", configKey, len(mcpServers)) + } else { + return fmt.Errorf("%s field exists in %s but is not a map", configKey, path) + } + } else { + log.Warnf("No %s field found, nothing to remove", configKey) + return nil + } + + // Remove the MCP server from the existing map + if _, exists := mcpServers[name]; exists { + log.Debugf("Removing existing MCP server: %s", name) + delete(mcpServers, name) + } else { + log.Warnf("MCP server %s not found, nothing to remove", name) + return nil + } + + // Update the mcpServers field in the config + configData[configKey] = mcpServers + + // Write the updated configuration + updatedData, err := json.MarshalIndent(configData, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal updated configuration: %w", err) + } + + err = os.WriteFile(path, updatedData, 0644) + if err != nil { + return fmt.Errorf("Failed to write updated configuration to %s: %v", path, err) + } + + log.Debugf("Successfully updated existing configuration at %s", path) + return nil +} + +// Server represents a server configuration in the JSON file +type MCPServer struct { + Args []string `json:"args"` + Command string `json:"command"` +} + +func configExtract(config ConfigPath, server string) (map[string]interface{}, error) { + // Check if the file exists + // Read the configuration file + data, err := os.ReadFile(config.Path) + if err != nil { + return nil, fmt.Errorf("Error reading file: %v", err) + } + + // Parse the JSON data + jsonConfig := make(map[string]interface{}) + if err := json.Unmarshal(data, &jsonConfig); err != nil { + return nil, fmt.Errorf("Error parsing JSON: %v", err) + } + + jsonServers, ok := jsonConfig[config.ConfigName].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("Error finding MCP server configuration: %v", err) + } + + serverConfig, ok := jsonServers[server].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("Error finding MCP server configuration: %v", err) + } + + args, ok := serverConfig["args"].([]interface{}) + + if ok { + for i, arg := range args { + if arg == "--bearer-token" && i+1 < len(args) { + serverConfig["bearer-token"] = args[i+1] + } + + if arg == "--url" && i+1 < len(args) { + appUrl := args[i+1] + serverConfig["url"] = appUrl + + if appUrlStr, ok := appUrl.(string); ok { + parsedURL, err := url.Parse(appUrlStr) + if err == nil { + hostnameParts := strings.Split(parsedURL.Hostname(), ".") + if len(hostnameParts) > 2 && hostnameParts[len(hostnameParts)-1] == "dev" && hostnameParts[len(hostnameParts)-2] == "fly" { + serverConfig["app"] = hostnameParts[len(hostnameParts)-3] + } else if len(hostnameParts) > 1 && hostnameParts[len(hostnameParts)-1] == "flycast" { + serverConfig["app"] = hostnameParts[len(hostnameParts)-2] + } else if len(hostnameParts) > 1 && hostnameParts[len(hostnameParts)-1] == "internal" { + serverConfig["app"] = hostnameParts[len(hostnameParts)-2] + } + } + } + } + } + } + + return serverConfig, nil +} diff --git a/internal/command/mcp/destroy.go b/internal/command/mcp/destroy.go new file mode 100644 index 0000000000..c9bb17b886 --- /dev/null +++ b/internal/command/mcp/destroy.go @@ -0,0 +1,138 @@ +package mcp + +import ( + "context" + "fmt" + "strings" + + "github.com/apex/log" + "github.com/spf13/cobra" + "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/command" + "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flyutil" +) + +func NewDestroy() *cobra.Command { + const ( + short = "[experimental] Destroy an MCP stdio server" + long = short + "\n" + usage = "destroy" + ) + + cmd := command.New(usage, short, long, runDestroy, command.LoadAppNameIfPresent) + cmd.Args = cobra.ExactArgs(0) + + flag.Add(cmd, + flag.App(), + + flag.String{ + Name: "server", + Description: "Name of the MCP server in the MCP client configuration", + }, + flag.StringArray{ + Name: "config", + Description: "Path to the MCP client configuration file", + }, + flag.Bool{ + Name: "yes", + Description: "Accept all confirmations", + Shorthand: "y", + }, + ) + + for client, name := range McpClients { + flag.Add(cmd, + flag.Bool{ + Name: client, + Description: "Remove MCP server from to the " + name + " client configuration", + }, + ) + } + + return cmd +} + +func runDestroy(ctx context.Context) error { + appName := appconfig.NameFromContext(ctx) + + if appName == "" { + server, configPaths, err := SelectServerAndConfig(ctx, true) + if err != nil { + return err + } + + if len(configPaths) == 0 { + return fmt.Errorf("No app name or MCP client configuration file provided") + } + + mcpConfig, err := configExtract(configPaths[0], server) + if err != nil { + return err + } + + var ok bool + appName, ok = mcpConfig["app"].(string) + if !ok { + return fmt.Errorf("No app name found in MCP client configuration") + } + } + + client := flyutil.ClientFromContext(ctx) + _, err := client.GetApp(ctx, appName) + if err != nil { + return fmt.Errorf("app not found: %w", err) + } + + // Destroy the app + args := []string{"apps", "destroy", appName} + + if flag.GetBool(ctx, "yes") { + args = append(args, "--yes") + } + + if err := flyctl(args...); err != nil { + return fmt.Errorf("failed to destroy app': %w", err) + } + + _, err = client.GetApp(ctx, appName) + if err == nil { + return fmt.Errorf("app not destroyed: %s", appName) + } + + args = []string{} + + // Remove the MCP server to the MCP client configurations + for client := range McpClients { + if flag.GetBool(ctx, client) { + args = append(args, "--"+client) + } + } + + for _, config := range flag.GetStringArray(ctx, "config") { + if config != "" { + log.Debugf("Removing %s from the MCP client configuration", config) + args = append(args, "--config", config) + } + } + + if len(args) > 0 { + args = append([]string{"mcp", "remove"}, args...) + + if app := flag.GetString(ctx, "app"); app != "" { + args = append(args, "--app", app) + } + if server := flag.GetString(ctx, "server"); server != "" { + args = append(args, "--server", server) + } + + // Run 'fly mcp remove ...' + if err := flyctl(args...); err != nil { + return fmt.Errorf("failed to run 'fly mcp remove': %w", err) + } + + log.Debug(strings.Join(args, " ")) + } + + return nil +} diff --git a/internal/command/mcp/launch.go b/internal/command/mcp/launch.go new file mode 100644 index 0000000000..9112370c4e --- /dev/null +++ b/internal/command/mcp/launch.go @@ -0,0 +1,448 @@ +package mcp + +import ( + "context" + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "slices" + "strings" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/google/shlex" + "github.com/spf13/cobra" + "github.com/superfly/flyctl/internal/cmdutil" + "github.com/superfly/flyctl/internal/command" + "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/logger" +) + +func NewLaunch() *cobra.Command { + const ( + short = "[experimental] Launch an MCP stdio server" + long = short + "\n" + usage = "launch command" + ) + cmd := command.New(usage, short, long, runLaunch) + cmd.Args = cobra.MaximumNArgs(1) + + flag.Add(cmd, + flag.String{ + Name: "name", + Description: "Suggested name for the app", + }, + flag.String{ + Name: "server", + Description: "Name to use for the MCP server in the MCP client configuration", + }, + flag.String{ + Name: "user", + Description: "User to authenticate with", + }, + flag.String{ + Name: "password", + Description: "Password to authenticate with", + }, + flag.Bool{ + Name: "bearer-token", + Description: "Use bearer token for authentication", + Default: true, + }, + flag.Bool{ + Name: "flycast", + Description: "Use wireguard and flycast for access", + }, + flag.Bool{ + Name: "inspector", + Description: "Launch MCP inspector: a developer tool for testing and debugging MCP servers", + Default: false, + Shorthand: "i", + }, + flag.StringArray{ + Name: "config", + Description: "Path to the MCP client configuration file (can be specified multiple times)", + }, + flag.String{ + Name: "auto-stop", + Description: "Automatically suspend the app after a period of inactivity. Valid values are 'off', 'stop', and 'suspend'", + Default: "suspend", + }, + flag.StringArray{ + Name: "secret", + Description: "Set of secrets in the form of NAME=VALUE pairs. Can be specified multiple times.", + }, + flag.StringArray{ + Name: "file-local", + Description: "Set of files in the form of /path/inside/machine= pairs. Can be specified multiple times.", + }, + flag.StringArray{ + Name: "file-literal", + Description: "Set of literals in the form of /path/inside/machine=VALUE pairs where VALUE is the content. Can be specified multiple times.", + }, + flag.StringArray{ + Name: "file-secret", + Description: "Set of secrets in the form of /path/inside/machine=SECRET pairs where SECRET is the name of the secret. Can be specified multiple times.", + }, + flag.String{ + Name: "region", + Shorthand: "r", + Description: "The target region. By default, the new volume will be created in the source volume's region.", + }, + flag.String{ + Name: "org", + Description: `The organization that will own the app`, + }, + flag.StringSlice{ + Name: "volume", + Shorthand: "v", + Description: "Volume to mount, in the form of :/path/inside/machine[:]", + }, + flag.String{ + Name: "image", + Description: "The image to use for the app", + }, + flag.StringSlice{ + Name: "setup", + Description: "Additional setup commands to run before launching the MCP server", + }, + flag.VMSizeFlags, + ) + + for client, name := range McpClients { + flag.Add(cmd, + flag.Bool{ + Name: client, + Description: "Add MCP server to the " + name + " client configuration", + }, + ) + } + + return cmd +} + +func runLaunch(ctx context.Context) error { + log := logger.FromContext(ctx) + + image := flag.GetString(ctx, "image") + + // Parse the command + command := flag.FirstArg(ctx) + cmdParts, err := shlex.Split(command) + if err != nil { + return fmt.Errorf("failed to parse command: %w", err) + } else if len(cmdParts) == 0 && image == "" { + return fmt.Errorf("missing command or image to run") + } + + setup := flag.GetStringSlice(ctx, "setup") + if len(setup) > 0 && image == "" { + image = "flyio/mcp" + } + + // extract the entrypoint from the image + entrypoint := []string{} + if image != "" { + ref, err := name.ParseReference(image) + if err != nil { + return fmt.Errorf("failed to parse image reference: %w", err) + } + img, err := remote.Image(ref) + if err != nil { + return fmt.Errorf("failed to find image: %w", err) + } + cfg, err := img.ConfigFile() + if err != nil { + return fmt.Errorf("failed to get image config: %w", err) + } + entrypoint = cfg.Config.Entrypoint + + if len(cmdParts) == 0 { + cmdParts = cfg.Config.Cmd + } + } + + // determine the name of the MCP server + serverName := flag.GetString(ctx, "server") + if serverName == "" { + serverName = flag.GetString(ctx, "name") + } + + if serverName == "" { + serverName = "fly-mcp" + + ignoreWords := []string{"npx", "uvx", "-y", "--yes", "go", "run"} + + for _, w := range cmdParts { + if !slices.Contains(ignoreWords, w) { + if at := strings.Index(w, "@"); at != -1 { + w = w[:at] + } + + re := regexp.MustCompile(`[-\w]+`) + split := re.FindAllString(w, -1) + + if len(split) > 0 { + serverName = split[len(split)-1] + break + } + } + } + } + + // Create a temporary directory + tempDir, err := os.MkdirTemp("", "fly-mcp-*") + if err != nil { + return fmt.Errorf("failed to create temporary directory: %w", err) + } + defer os.RemoveAll(tempDir) + + log.Debugf("Created temporary directory: %s\n", tempDir) + + appName := flag.GetString(ctx, "name") + if appName == "" { + appName = serverName + } + + appDir := filepath.Join(tempDir, appName) + if err := os.MkdirAll(appDir, 0755); err != nil { + return fmt.Errorf("failed to create app directory: %w", err) + } + + log.Debugf("Created app directory: %s\n", appDir) + + if err := os.Chdir(appDir); err != nil { + return fmt.Errorf("failed to change to app directory: %w", err) + } + + args := []string{"launch", "--yes", "--no-deploy"} + + if cmdParts[0] == "go" && image == "" { + image = "golang:latest" + } + + if image != "" { + dockerfile := []string{"FROM " + image} + + if image != "flyio/mcp" { + dockerfile = append(dockerfile, "COPY --from=flyio/flyctl /flyctl /usr/bin/flyctl") + entrypoint = append([]string{"/usr/bin/flyctl", "mcp", "wrap", "--"}, entrypoint...) + } + + dockerfile = append(dockerfile, setup...) + + jsonData, err := json.Marshal(entrypoint) + if err != nil { + return fmt.Errorf("failed to marshal entrypoint to JSON: %w", err) + } + dockerfile = append(dockerfile, "ENTRYPOINT "+string(jsonData)) + + if len(cmdParts) > 0 { + jsonData, err := json.Marshal(cmdParts) + if err != nil { + return fmt.Errorf("failed to marshal command parts to JSON: %w", err) + } + + dockerfile = append(dockerfile, "CMD "+string(jsonData)) + } + + dockerfileContent := strings.Join(dockerfile, "\n") + "\n" + + fmt.Println(dockerfileContent) + + if err := os.WriteFile(filepath.Join(appDir, "Dockerfile"), []byte(dockerfileContent), 0644); err != nil { + return fmt.Errorf("failed to create Dockerfile: %w", err) + } + + log.Debug("Created Dockerfile") + } else { + args = append(args, "--command", command, "--image", "flyio/mcp") + } + + if flycast := flag.GetBool(ctx, "flycast"); flycast { + args = append(args, "--flycast") + } + + if autoStop := flag.GetString(ctx, "auto-stop"); autoStop != "" { + args = append(args, "--auto-stop", autoStop) + } + + if region := flag.GetString(ctx, "region"); region != "" { + args = append(args, "--region", region) + } + + if org := flag.GetString(ctx, "org"); org != "" { + args = append(args, "--org", org) + } + + if vmCpuKind := flag.GetString(ctx, "vm-cpu-kind"); vmCpuKind != "" { + args = append(args, "--vm-cpu-kind", vmCpuKind) + } + + if vmCpus := flag.GetInt(ctx, "vm-cpus"); vmCpus != 0 { + args = append(args, "--vm-cpus", fmt.Sprintf("%d", vmCpus)) + } + + if vmGpuKind := flag.GetString(ctx, "vm-gpu-kind"); vmGpuKind != "" { + args = append(args, "--vm-gpu-kind", vmGpuKind) + } + + if vmGpus := flag.GetInt(ctx, "vm-gpus"); vmGpus != 0 { + args = append(args, "--vm-gpus", fmt.Sprintf("%d", vmGpus)) + } + + if vmMemory := flag.GetString(ctx, "vm-memory"); vmMemory != "" { + args = append(args, "--vm-memory", vmMemory) + } + + if vmSize := flag.GetString(ctx, "vm-size"); vmSize != "" { + args = append(args, "--vm-size", vmSize) + } + + if hostDedicationId := flag.GetString(ctx, "host-dedication-id"); hostDedicationId != "" { + args = append(args, "--host-dedication-id", hostDedicationId) + } + + volumes := flag.GetStringSlice(ctx, "volume") + if len(volumes) > 0 { + args = append(args, "--volume", strings.Join(volumes, ",")) + } + + // Run fly launch, but don't deploy + if err := flyctl(args...); err != nil { + return fmt.Errorf("failed to run 'fly launch': %w", err) + } + + log.Debug("Launched fly application") + + args = []string{} + + // Add the MCP server to the MCP client configurations + for client := range McpClients { + if flag.GetBool(ctx, client) { + log.Debugf("Adding %s to MCP client configuration", client) + args = append(args, "--"+client) + } + } + + for _, config := range flag.GetStringArray(ctx, "config") { + if config != "" { + log.Debugf("Adding %s to MCP client configuration", config) + args = append(args, "--config", config) + } + } + + tmpConfig := filepath.Join(tempDir, "mcpConfig.json") + if flag.GetBool(ctx, "inspector") { + // If the inspector flag is set, capture the MCP client configuration + log.Debug("Adding inspector to MCP client configuration") + args = append(args, "--config", tmpConfig) + } + + if len(args) == 0 { + log.Debug("No MCP client configuration flags provided") + } else { + args = append([]string{"mcp", "add"}, args...) + args = append(args, "--name", serverName) + + if user := flag.GetString(ctx, "user"); user != "" { + args = append(args, "--user", user) + } + + if password := flag.GetString(ctx, "password"); password != "" { + args = append(args, "--password", password) + } + + if bearer := flag.GetBool(ctx, "bearer-token"); bearer { + args = append(args, "--bearer-token") + } + + if flycast := flag.GetBool(ctx, "flycast"); flycast { + args = append(args, "--flycast") + } + + // Run 'fly mcp add ...' + if err := flyctl(args...); err != nil { + return fmt.Errorf("failed to run 'fly mcp add': %w", err) + } + } + + // Add secrets to the app + if secrets := flag.GetStringArray(ctx, "secret"); len(secrets) > 0 { + parsedSecrets, err := cmdutil.ParseKVStringsToMap(secrets) + if err != nil { + return fmt.Errorf("failed parsing secrets: %w", err) + } + + args = []string{"secrets", "set"} + for k, v := range parsedSecrets { + args = append(args, fmt.Sprintf("%s=%s", k, v)) + } + + // Run 'fly secrets set ...' + if err := flyctl(args...); err != nil { + return fmt.Errorf("failed to run 'fly secrets set': %w", err) + } + } + + args = []string{"deploy", "--ha=false"} + + for _, file := range flag.GetStringArray(ctx, "file-local") { + if file != "" { + args = append(args, "--file-local", file) + } + } + + for _, file := range flag.GetStringArray(ctx, "file-literal") { + if file != "" { + args = append(args, "--file-literal", file) + } + } + + for _, file := range flag.GetStringArray(ctx, "file-secret") { + if file != "" { + args = append(args, "--file-secret", file) + } + } + + // Deploy to a single machine + if err := flyctl(args...); err != nil { + return fmt.Errorf("failed to run 'fly launch': %w", err) + } + + log.Debug("Successfully completed MCP server launch and configuration") + + // If the inspector flag is set, run the MCP inspector + if flag.GetBool(ctx, "inspector") { + server, err := configExtract(ConfigPath{Path: tmpConfig, ConfigName: "mcpServers"}, serverName) + if err != nil { + return fmt.Errorf("failed to extract config: %w", err) + } + + args := []string{"@modelcontextprotocol/inspector@latest"} + args = append(args, server["command"].(string)) + + // Convert []interface{} to []string + rawArgs, _ := server["args"].([]interface{}) + for _, v := range rawArgs { + if s, ok := v.(string); ok { + args = append(args, s) + } + } + + inspectorCmd := exec.Command("npx", args...) + inspectorCmd.Env = os.Environ() + inspectorCmd.Stdout = os.Stdout + inspectorCmd.Stderr = os.Stderr + inspectorCmd.Stdin = os.Stdin + if err := inspectorCmd.Run(); err != nil { + return fmt.Errorf("failed to run MCP inspector: %w", err) + } + log.Debug("MCP inspector launched") + } + + return nil +} diff --git a/internal/command/mcp/list.go b/internal/command/mcp/list.go new file mode 100644 index 0000000000..8a688c0554 --- /dev/null +++ b/internal/command/mcp/list.go @@ -0,0 +1,170 @@ +package mcp + +import ( + "context" + "encoding/json" + "fmt" + "os" + + "github.com/spf13/cobra" + "github.com/superfly/flyctl/internal/command" + "github.com/superfly/flyctl/internal/flag" +) + +func newList() *cobra.Command { + const ( + short = "[experimental] List MCP servers" + long = short + "\n" + usage = "list" + ) + + cmd := command.New(usage, short, long, runList) + cmd.Args = cobra.ExactArgs(0) + + flag.Add(cmd, + flag.App(), + flag.StringArray{ + Name: "config", + Description: "Path to the MCP client configuration file (can be specified multiple times)", + }, + flag.Bool{ + Name: "json", + Description: "Output in JSON format", + }, + ) + + for client, name := range McpClients { + flag.Add(cmd, + flag.Bool{ + Name: client, + Description: "List MCP servers from the " + name + " client configuration", + }, + ) + } + + return cmd +} + +func runList(ctx context.Context) error { + // Check if the user has specified any client flags + configSelected := false + for client := range McpClients { + configSelected = configSelected || flag.GetBool(ctx, client) + } + + // if no cllent is selected, select all clients + if !configSelected { + for client := range McpClients { + flag.SetString(ctx, client, "true") + } + } + + // Get a list of config paths + configPaths, err := ListConfigPaths(ctx, true) + if err != nil { + return err + } + + // build a server map from all of the configs + serverMap := make(map[string]any) + + for _, configPath := range configPaths { + // if the configuration file doesn't exist, skip it + if _, err := os.Stat(configPath.Path); err != nil { + if os.IsNotExist(err) { + continue + } + return err + } + + // read the configuration file + file, err := os.Open(configPath.Path) + if err != nil { + return err + } + defer file.Close() + + // parse the configuration file as JSON + var data map[string]any + decoder := json.NewDecoder(file) + if err := decoder.Decode(&data); err != nil { + return fmt.Errorf("failed to parse %s: %w", configPath.Path, err) + } + + if mcpServers, ok := data[configPath.ConfigName].(map[string]any); ok { + // add metadata about the tool + config := make(map[string]any) + config["mcpServers"] = mcpServers + config["configName"] = configPath.ConfigName + + if configPath.ToolName != "" { + config["toolName"] = configPath.ToolName + } + + serverMap[configPath.Path] = config + + // add metadata about each MCP server + for name := range mcpServers { + if serverMap, ok := mcpServers[name].(map[string]any); ok { + server, err := configExtract(configPath, name) + if err != nil { + return fmt.Errorf("failed to extract config for %s: %w", name, err) + } + + for key, value := range server { + if key != "command" && key != "args" { + serverMap[key] = value + } + } + + mcpServers[name] = serverMap + } + } + } + } + + // if the user has specified the --json flag, output the server map as JSON + if flag.GetBool(ctx, "json") { + output, err := json.MarshalIndent(serverMap, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal server map: %w", err) + } + fmt.Println(string(output)) + return nil + } + + // if no MCP servers were found, print a message and return + if len(serverMap) == 0 { + fmt.Println("No MCP servers found.") + return nil + } + + // print the server map in a human-readable format + for pathName, configPath := range serverMap { + fmt.Printf("Config Path: %s\n", pathName) + if config, ok := configPath.(map[string]any); ok { + if toolName, ok := config["toolName"].(string); ok { + fmt.Printf(" Tool Name: %s\n", toolName) + } + + if servers, ok := config["mcpServers"].(map[string]any); ok { + for name := range servers { + fmt.Printf(" MCP Server: %v\n", name) + + server, ok := servers[name].(map[string]any) + + if ok { + for key, value := range server { + if key != "command" && key != "args" { + fmt.Printf(" %s: %v\n", key, value) + } + } + } + } + } + } + fmt.Println() + } + + return nil +} diff --git a/internal/command/mcp/logs.go b/internal/command/mcp/logs.go new file mode 100644 index 0000000000..0284de8cc1 --- /dev/null +++ b/internal/command/mcp/logs.go @@ -0,0 +1,85 @@ +package mcp + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + "github.com/superfly/flyctl/internal/command" + "github.com/superfly/flyctl/internal/flag" +) + +func newLogs() *cobra.Command { + const ( + short = "[experimental] Show log for an MCP server" + long = short + "\n" + usage = "logs" + ) + + cmd := command.New(usage, short, long, runLogs) + cmd.Args = cobra.ExactArgs(0) + + flag.Add(cmd, + flag.App(), + flag.StringArray{ + Name: "config", + Description: "Path to the MCP client configuration file (can be specified multiple times)", + }, + flag.String{ + Name: "server", + Description: "Name of the MCP server to show logs for", + }, + flag.Bool{ + Name: "json", + Description: "Output in JSON format", + }, + flag.Bool{ + Name: "no-tail", + Shorthand: "n", + Description: "Do not continually stream logs", + }, + ) + + for client, name := range McpClients { + flag.Add(cmd, + flag.Bool{ + Name: client, + Description: "Select MCP server from the " + name + " client configuration", + }, + ) + } + + return cmd +} + +func runLogs(ctx context.Context) error { + name, configPaths, err := SelectServerAndConfig(ctx, false) + if err != nil { + return err + } + + server, err := configExtract(configPaths[0], name) + if err != nil { + return err + } + + if app, ok := server["app"].(string); ok { + args := []string{"logs", "--app", app} + + if flag.GetBool(ctx, "json") { + args = append(args, "--json") + } + + if flag.GetBool(ctx, "no-tail") { + args = append(args, "--no-tail") + } + + if err := flyctl(args...); err != nil { + return fmt.Errorf("failed to run 'fly logs': %w", err) + } + } else { + return fmt.Errorf("MCP server %s does not have an app", name) + } + + return nil +} diff --git a/internal/command/mcp/mcp.go b/internal/command/mcp/mcp.go new file mode 100644 index 0000000000..71f29daf36 --- /dev/null +++ b/internal/command/mcp/mcp.go @@ -0,0 +1,58 @@ +package mcp + +import ( + "fmt" + "os" + "os/exec" + "strings" + + "github.com/apex/log" + "github.com/spf13/cobra" + "github.com/superfly/flyctl/internal/command" +) + +func New() *cobra.Command { + const ( + short = `flyctl Model Context Protocol.` + + long = short + "\n" + ) + + cmd := command.New("mcp", short, long, nil) + // cmd.Hidden = true + + cmd.AddCommand( + NewProxy(), + NewInspect(), + newServer(), + NewWrap(), + + NewAdd(), + NewRemove(), + + NewLaunch(), + NewDestroy(), + + newVolume(), + newList(), + newLogs(), + ) + + return cmd +} + +func flyctl(args ...string) error { + executable, err := os.Executable() + if err != nil { + return fmt.Errorf("failed to find executable: %w", err) + } + + log.Debugf("Running:", executable, strings.Join(args, " ")) + + cmd := exec.Command(executable, args...) + cmd.Env = os.Environ() + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Stdin = os.Stdin + return cmd.Run() +} diff --git a/internal/command/mcp/proxy.go b/internal/command/mcp/proxy.go new file mode 100644 index 0000000000..c9268c7f0e --- /dev/null +++ b/internal/command/mcp/proxy.go @@ -0,0 +1,376 @@ +package mcp + +import ( + "context" + "fmt" + "log" + "net" + "net/url" + "os" + "os/exec" + "strings" + + "github.com/spf13/cobra" + "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/command" + mcpProxy "github.com/superfly/flyctl/internal/command/mcp/proxy" + "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flag/flagnames" +) + +var sharedProxyFlags = flag.Set{ + flag.App(), + + flag.String{ + Name: "url", + Description: "URL of the MCP wrapper server", + }, + flag.String{ + Name: "bearer-token", + Description: "Bearer token to authenticate with", + }, + flag.String{ + Name: "user", + Description: "User to authenticate with", + Shorthand: "u", + }, + flag.String{ + Name: "password", + Description: "Password to authenticate with", + Shorthand: "p", + }, + flag.String{ + Name: flagnames.BindAddr, + Shorthand: "b", + Default: "127.0.0.1", + Description: "Local address to bind to", + }, + flag.String{ + Name: "instance", + Description: "Use fly-force-instance-id to connect to a specific instance", + }, + flag.Bool{ + Name: "sse", + Description: "Use Server-Sent Events (SSE) for the MCP connection", + }, + flag.Bool{ + Name: "stream", + Description: "Use streaming for the MCP connection", + }, + flag.Int{ + Name: "timeout", + Description: "Timeout in seconds for the MCP connection", + }, + flag.Bool{ + Name: "ping", + Description: "Enable ping for the MCP connection", + }, +} + +func NewProxy() *cobra.Command { + const ( + short = "[experimental] Start an MCP proxy client" + long = short + "\n" + usage = "proxy" + ) + + cmd := command.New(usage, short, long, runProxy, command.LoadAppNameIfPresent) + cmd.Args = cobra.ExactArgs(0) + + flag.Add(cmd, + sharedProxyFlags, + flag.Bool{ + Name: "inspector", + Description: "Launch MCP inspector: a developer tool for testing and debugging MCP servers", + Default: false, + Shorthand: "i", + }, + ) + + return cmd +} + +func NewInspect() *cobra.Command { + const ( + short = "[experimental] Inspect a MCP stdio server" + long = short + "\n" + usage = "inspect" + ) + + cmd := command.New(usage, short, long, runInspect, command.LoadAppNameIfPresent) + cmd.Args = cobra.ExactArgs(0) + + flag.Add(cmd, + sharedProxyFlags, + flag.String{ + Name: "server", + Description: "Name of the MCP server in the MCP client configuration", + }, + flag.String{ + Name: "config", + Description: "Path to the MCP client configuration file", + }, + ) + + for client, name := range McpClients { + flag.Add(cmd, + flag.Bool{ + Name: client, + Description: "Use the configuration for " + name + " client", + }, + ) + } + + return cmd +} + +func getInfo(ctx context.Context) mcpProxy.ProxyInfo { + proxyInfo := mcpProxy.ProxyInfo{ + Url: flag.GetString(ctx, "url"), + BearerToken: flag.GetString(ctx, "bearer-token"), + User: flag.GetString(ctx, "user"), + Password: flag.GetString(ctx, "password"), + Instance: flag.GetString(ctx, "instance"), + Mode: "passthru", // Default mode is passthru + Timeout: flag.GetInt(ctx, "timeout"), + Ping: flag.GetBool(ctx, "ping"), + } + + if flag.GetBool(ctx, "sse") { + proxyInfo.Mode = "sse" + } else if flag.GetBool(ctx, "stream") { + proxyInfo.Mode = "stream" + } + + return proxyInfo +} + +func runProxy(ctx context.Context) error { + proxyInfo := getInfo(ctx) + + return runProxyOrInspect(ctx, proxyInfo, flag.GetBool(ctx, "inspector")) +} + +func runInspect(ctx context.Context) error { + proxyInfo := getInfo(ctx) + + server, configPaths, err := SelectServerAndConfig(ctx, false) + if err != nil { + return err + } + + if len(configPaths) == 1 { + mcpConfig, err := configExtract(configPaths[0], server) + if err != nil { + return err + } + + if proxyInfo.Url == "" { + proxyInfo.Url, _ = mcpConfig["url"].(string) + } + if proxyInfo.BearerToken == "" { + proxyInfo.BearerToken, _ = mcpConfig["bearer-token"].(string) + } + if proxyInfo.User == "" { + proxyInfo.User, _ = mcpConfig["user"].(string) + } + if proxyInfo.Password == "" { + proxyInfo.Password, _ = mcpConfig["password"].(string) + } + } else if len(configPaths) > 1 { + return fmt.Errorf("multiple MCP client configuration files specified. Please specify at most one") + } + + return runProxyOrInspect(ctx, proxyInfo, true) +} + +func runProxyOrInspect(ctx context.Context, proxyInfo mcpProxy.ProxyInfo, inspect bool) error { + + // If no URL is provided, try to get it from the app config + // If that fails, return an error + if proxyInfo.Url == "" { + appConfig := appconfig.ConfigFromContext(ctx) + + if appConfig != nil { + appUrl := appConfig.URL() + if appUrl != nil { + proxyInfo.Url = appUrl.String() + } + } + + if proxyInfo.Url == "" { + log.Fatal("The app config could not be found and no URL was provided") + } + } + + if inspect { + flyctl, err := os.Executable() + if err != nil { + return fmt.Errorf("failed to find executable: %w", err) + } + + args := []string{"@modelcontextprotocol/inspector@latest", flyctl, "mcp", "proxy", "--url", proxyInfo.Url} + + if proxyInfo.BearerToken != "" { + args = append(args, "--bearer-token", proxyInfo.BearerToken) + } + if proxyInfo.User != "" { + args = append(args, "--user", proxyInfo.User) + } + if proxyInfo.Password != "" { + args = append(args, "--password", proxyInfo.Password) + } + if proxyInfo.Instance != "" { + args = append(args, "--instance", proxyInfo.Instance) + } + if proxyInfo.Mode == "sse" { + args = append(args, "--sse") + } else if proxyInfo.Mode == "stream" { + args = append(args, "--stream") + } + if proxyInfo.Timeout > 0 { + args = append(args, "--timeout", fmt.Sprintf("%d", proxyInfo.Timeout)) + } + if proxyInfo.Ping { + args = append(args, "--ping") + } + + // Launch MCP inspector + cmd := exec.Command("npx", args...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to launch MCP inspector: %w", err) + } + return nil + } + + url, proxyCmd, err := resolveProxy(ctx, proxyInfo.Url) + if err != nil { + log.Fatalf("Error resolving proxy URL: %v", err) + } + + proxyInfo.Url = url + + // Configure logging to go to stderr only + log.SetOutput(os.Stderr) + + if flag.GetBool(ctx, "sse") { + proxyInfo.Mode = "sse" + } else if flag.GetBool(ctx, "stream") { + proxyInfo.Mode = "stream" + } + + if proxyInfo.Mode == "passthru" { + fmt.Fprintf(os.Stderr, "Starting MCP proxy passthru mode for URL: %s\n", proxyInfo.Url) + err = mcpProxy.Passthru(proxyInfo) + } else { + fmt.Fprintf(os.Stderr, "Starting MCP proxy in %s mode for URL: %s\n", proxyInfo.Mode, proxyInfo.Url) + err = mcpProxy.Replay(ctx, proxyInfo) + } + + if err != nil { + log.Fatal(err) + } + + // Kill the proxy process if it was started + if proxyCmd != nil { + if err := proxyCmd.Process.Kill(); err != nil { + log.Printf("Error killing proxy process: %v", err) + } + proxyCmd.Wait() + } + + return nil +} + +// resolveProxy starts the proxy process and returns the new URL +func resolveProxy(ctx context.Context, originalUrl string) (string, *exec.Cmd, error) { + appName := flag.GetString(ctx, "app") + + parsedURL, err := url.Parse(originalUrl) + if err != nil { + return "", nil, fmt.Errorf("error parsing URL: %w", err) + } + + // If the app name is not provided, try to extract it from the URL + if appName == "" { + hostname := parsedURL.Hostname() + if strings.HasSuffix(hostname, ".internal") || strings.HasSuffix(hostname, ".flycast") { + // Split the hostname by dots + parts := strings.Split(hostname, ".") + + // The app name should be the part before the last segment (internal or flycast) + if len(parts) >= 2 { + appName = parts[len(parts)-2] + } else { + return originalUrl, nil, nil + } + } else { + return originalUrl, nil, nil + } + } + + if parsedURL.Scheme != "http" { + return "", nil, fmt.Errorf("unsupported URL scheme: %s", parsedURL.Scheme) + } + + // get an available port on the local machine + localPort, err := getAvailablePort() + if err != nil { + return "", nil, fmt.Errorf("error getting available port: %w", err) + } + + remoteHost := parsedURL.Hostname() + + remotePort := parsedURL.Port() + if remotePort == "" { + if parsedURL.Scheme == "http" { + remotePort = "80" + } else if parsedURL.Scheme == "https" { + remotePort = "443" + } + } + + ports := fmt.Sprintf("%d:%s", localPort, remotePort) + + flyctl, err := os.Executable() + if err != nil { + return "", nil, fmt.Errorf("failed to find executable: %w", err) + } + + cmd := exec.Command(flyctl, "proxy", ports, remoteHost, "--quiet", "--app", appName) + cmd.Stdin = nil + cmd.Stdout = os.Stderr + cmd.Stderr = os.Stderr + + if err := cmd.Start(); err != nil { + log.Fatalf("Error running subprocess: %v", err) + } + + bindAddr := flag.GetBindAddr(ctx) + + parsedURL.Host = fmt.Sprintf("%s:%d", bindAddr, localPort) + + return parsedURL.String(), cmd, nil +} + +// getAvailablePort finds an available port on the local machine +func getAvailablePort() (int, error) { + addr, err := net.ResolveTCPAddr("tcp", "localhost:0") + + if err != nil { + return 0, err + } + + listener, err := net.ListenTCP("tcp", addr) + + if err != nil { + return 0, err + } + + defer listener.Close() + + return listener.Addr().(*net.TCPAddr).Port, nil +} diff --git a/internal/command/mcp/proxy/passthru.go b/internal/command/mcp/proxy/passthru.go new file mode 100644 index 0000000000..f7279f078d --- /dev/null +++ b/internal/command/mcp/proxy/passthru.go @@ -0,0 +1,214 @@ +package mcpProxy + +import ( + "bufio" + "bytes" + "fmt" + "io" + "log" + "net/http" + "os" + "os/signal" + "strings" + "sync" + "syscall" + "time" +) + +func Passthru(proxyInfo ProxyInfo) error { + err := waitForServer(proxyInfo) + if err != nil { + return fmt.Errorf("error waiting for server: %w", err) + } + + // Store whether the SSE connection is ready + // This may become unready if the connection is closed + ready := false + readyMutex := sync.Mutex{} + readyCond := sync.NewCond(&readyMutex) + + // Start the HTTP client + go func() { + start := time.Now() + for { + getFromServer(proxyInfo, &ready, readyCond) + + // Ready should be set to false when the connection is closed + readyCond.L.Lock() + ready = false + readyCond.Broadcast() + readyCond.L.Unlock() + + // Wait a minimum of 10 seconds before the next request + elapsed := time.Since(start) + if elapsed < 10*time.Second { + time.Sleep(10*time.Second - elapsed) + } + start = time.Now() + } + }() + + // Start processing stdin + if err := processStdin(proxyInfo, &ready, readyCond); err != nil { + return fmt.Errorf("error processing stdin: %w", err) + } + + return nil +} + +// waitForServer waits for the server to be up and running +func waitForServer(proxyInfo ProxyInfo) error { + // Continue to post nothing until the server is up + delay := 100 * time.Millisecond + var err error + for delay < 60*time.Second { + err = sendToServer("", proxyInfo) + + if err == nil { + break + } else if !strings.Contains(err.Error(), "connection refused") { + log.Printf("Error sending message to server: %v", err) + break + } + + time.Sleep(delay) + delay *= 2 + } + + return err +} + +// ProcessStdin reads messages from stdin and forwards them to the server +func processStdin(proxyInfo ProxyInfo, ready *bool, readyCond *sync.Cond) error { + stp := make(chan os.Signal, 1) + signal.Notify(stp, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-stp + os.Exit(0) + }() + + scanner := bufio.NewScanner(os.Stdin) + for scanner.Scan() { + line := scanner.Text() + "\n" + + // Skip empty lines + if strings.TrimSpace(line) == "" { + continue + } + + // Wait for the server to be ready + readyCond.L.Lock() + for !*ready { + readyCond.Wait() + } + readyCond.L.Unlock() + + // Forward raw message to server + err := sendToServer(line, proxyInfo) + if err != nil { + // Log error but continue processing + log.Printf("Error sending message to server: %v", err) + // We could format an error message here, but since we're operating at the raw string level, + // we'll return a generic error JSON + errMsg := fmt.Sprintf(`{"type":"error","content":"Failed to send to server: %v"}`, err) + fmt.Fprintln(os.Stdout, errMsg) + continue + } + } + + if err := scanner.Err(); err != nil { + return fmt.Errorf("error reading from stdin: %w", err) + } + + return nil +} + +// getFromServer sends a GET request to the server and streams the response to stdout +func getFromServer(proxyInfo ProxyInfo, ready *bool, readyCond *sync.Cond) error { + // Create HTTP request + req, err := http.NewRequest("GET", proxyInfo.Url, nil) + if err != nil { + return fmt.Errorf("error creating request: %w", err) + } + req.Header.Set("User-Agent", "mcp-bridge-client") + req.Header.Set("Accept", "application/json") + + // Set basic authentication if bearer token or user is provided + if proxyInfo.BearerToken != "" { + req.Header.Set("Authorization", "Bearer "+proxyInfo.BearerToken) + } else if proxyInfo.User != "" { + req.SetBasicAuth(proxyInfo.User, proxyInfo.Password) + } + + // Send request + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("error sending request: %w", err) + } + defer resp.Body.Close() + + // Check response status + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("server returned error: %s (status %d)", resp.Status, resp.StatusCode) + } + + // We're now ready to receive messages + readyCond.L.Lock() + *ready = true + readyCond.Broadcast() + readyCond.L.Unlock() + + // Stream response body to stdout + if _, err := io.Copy(os.Stdout, resp.Body); err != nil { + return fmt.Errorf("error streaming response to stdout: %w", err) + } + + return nil +} + +// SendToServer sends a raw message to the server and returns the raw response +func sendToServer(message string, proxyInfo ProxyInfo) error { + // Create HTTP request with raw message + req, err := http.NewRequest("POST", proxyInfo.Url, bytes.NewBufferString(message)) + if err != nil { + return fmt.Errorf("error creating request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("User-Agent", "mcp-bridge-client") + req.Header.Set("Accept", "application/json, text/event-stream") + + // Set basic authentication if bearer token or user is provided + if proxyInfo.BearerToken != "" { + req.Header.Set("Authorization", "Bearer "+proxyInfo.BearerToken) + } else if proxyInfo.User != "" { + req.SetBasicAuth(proxyInfo.User, proxyInfo.Password) + } + + // If requesting a specific instance, set the header + if proxyInfo.Instance != "" { + req.Header.Set("Fly-Force-Instance-Id", proxyInfo.Instance) + } + + // Send request + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("error sending request: %w", err) + } + defer resp.Body.Close() + + // Check response status + if resp.StatusCode != http.StatusAccepted { + // Read response body + body, err := io.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("error reading response: %w", err) + } + + return fmt.Errorf("server returned error: %s (status %d)", body, resp.StatusCode) + } + + // Request was accepted + return nil +} diff --git a/internal/command/mcp/proxy/relay.go b/internal/command/mcp/proxy/relay.go new file mode 100644 index 0000000000..1ef158632b --- /dev/null +++ b/internal/command/mcp/proxy/relay.go @@ -0,0 +1,273 @@ +package mcpProxy + +import ( + "context" + "encoding/base64" + "fmt" + "time" + + "github.com/mark3labs/mcp-go/client" + "github.com/mark3labs/mcp-go/client/transport" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "github.com/superfly/flyctl/internal/buildinfo" +) + +func Replay(ctx context.Context, proxyInfo ProxyInfo) error { + // Create a new MCP client based on the provided configuration + mcpClient, err := newMCPClient(proxyInfo) + if err != nil { + return fmt.Errorf("error creating MCP client: %w", err) + } + defer mcpClient.Close() + + // Create a new MCP server + mcpServer := server.NewMCPServer( + "FlyMCP Proxy 🚀", + buildinfo.Info().Version.String(), + ) + + // Add the MCP client to the server + err = addToMCPServer(ctx, mcpClient, mcpServer) + if err != nil { + return fmt.Errorf("error adding MCP client to server: %w", err) + } + + if proxyInfo.Ping { + go startPingTask(ctx, mcpClient) + } + + // Start the stdio server + if err := server.ServeStdio(mcpServer); err != nil { + return fmt.Errorf("error starting stdio server: %w", err) + } + + return nil +} + +func newMCPClient(proxyInfo ProxyInfo) (*client.Client, error) { + headers := make(map[string]string) + + if proxyInfo.BearerToken != "" { + headers["Authorization"] = "Bearer " + proxyInfo.BearerToken + } else if proxyInfo.User != "" { + auth := proxyInfo.User + ":" + proxyInfo.Password + headers["Authorization"] = "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) + } + + if proxyInfo.Instance != "" { + headers["Fly-Force-Instance-Id"] = proxyInfo.Instance + } + + var err error + var mcpClient *client.Client + + if proxyInfo.Mode == "sse" { + var options []transport.ClientOption + + if len(headers) > 0 { + options = append(options, client.WithHeaders(headers)) + } + + mcpClient, err = client.NewSSEMCPClient(proxyInfo.Url, options...) + } else { + var options []transport.StreamableHTTPCOption + + if len(headers) > 0 { + options = append(options, transport.WithHTTPHeaders(headers)) + } + + if proxyInfo.Timeout > 0 { + options = append(options, transport.WithHTTPTimeout(time.Duration(proxyInfo.Timeout)*time.Second)) + } + + mcpClient, err = client.NewStreamableHttpClient(proxyInfo.Url, options...) + } + + if err != nil { + return nil, err + } + + return mcpClient, nil +} + +func addToMCPServer(ctx context.Context, mcpClient *client.Client, mcpServer *server.MCPServer) error { + err := mcpClient.Start(ctx) + if err != nil { + return err + } + + initRequest := mcp.InitializeRequest{} + initRequest.Params.ProtocolVersion = mcp.LATEST_PROTOCOL_VERSION + initRequest.Params.ClientInfo = mcp.Implementation{ + Name: "FlyMCP Proxy Client", + Version: buildinfo.Info().Version.String(), + } + initRequest.Params.Capabilities = mcp.ClientCapabilities{ + Experimental: make(map[string]interface{}), + Roots: nil, + Sampling: nil, + } + + _, err = mcpClient.Initialize(ctx, initRequest) + + if err != nil { + return err + } + + err = addToolsToServer(ctx, mcpClient, mcpServer) + if err != nil { + return err + } + + _ = addPromptsToServer(ctx, mcpClient, mcpServer) + _ = addResourcesToServer(ctx, mcpClient, mcpServer) + _ = addResourceTemplatesToServer(ctx, mcpClient, mcpServer) + _ = addNotificationsToServer(ctx, mcpClient, mcpServer) + + return nil +} + +func startPingTask(ctx context.Context, mcpClient *client.Client) { + ticker := time.NewTicker(30 * time.Second) + defer ticker.Stop() +PingLoop: + for { + select { + case <-ctx.Done(): + break PingLoop + case <-ticker.C: + _ = mcpClient.Ping(ctx) + } + } +} + +func addToolsToServer(ctx context.Context, mcpClient *client.Client, mcpServer *server.MCPServer) error { + toolsRequest := mcp.ListToolsRequest{} + + for { + tools, err := mcpClient.ListTools(ctx, toolsRequest) + + if err != nil { + return err + } + + if len(tools.Tools) == 0 { + break + } + + for _, tool := range tools.Tools { + mcpServer.AddTool(tool, mcpClient.CallTool) + } + + if tools.NextCursor == "" { + break + } + + toolsRequest.Params.Cursor = tools.NextCursor + } + + return nil +} + +func addPromptsToServer(ctx context.Context, mcpClient *client.Client, mcpServer *server.MCPServer) error { + promptsRequest := mcp.ListPromptsRequest{} + for { + prompts, err := mcpClient.ListPrompts(ctx, promptsRequest) + + if err != nil { + return err + } + + if len(prompts.Prompts) == 0 { + break + } + + for _, prompt := range prompts.Prompts { + mcpServer.AddPrompt(prompt, mcpClient.GetPrompt) + } + + if prompts.NextCursor == "" { + break + } + + promptsRequest.Params.Cursor = prompts.NextCursor + } + return nil +} + +func addResourcesToServer(ctx context.Context, mcpClient *client.Client, mcpServer *server.MCPServer) error { + resourcesRequest := mcp.ListResourcesRequest{} + + for { + resources, err := mcpClient.ListResources(ctx, resourcesRequest) + + if err != nil { + return err + } + + if len(resources.Resources) == 0 { + break + } + + for _, resource := range resources.Resources { + mcpServer.AddResource(resource, func(ctx context.Context, request mcp.ReadResourceRequest) ([]mcp.ResourceContents, error) { + readResource, e := mcpClient.ReadResource(ctx, request) + if e != nil { + return nil, e + } + return readResource.Contents, nil + }) + } + + if resources.NextCursor == "" { + break + } + + resourcesRequest.Params.Cursor = resources.NextCursor + } + + return nil +} + +func addResourceTemplatesToServer(ctx context.Context, mcpClient *client.Client, mcpServer *server.MCPServer) error { + resourceTemplatesRequest := mcp.ListResourceTemplatesRequest{} + + for { + resourceTemplates, err := mcpClient.ListResourceTemplates(ctx, resourceTemplatesRequest) + + if err != nil { + return err + } + + if len(resourceTemplates.ResourceTemplates) == 0 { + break + } + + for _, resourceTemplate := range resourceTemplates.ResourceTemplates { + mcpServer.AddResourceTemplate(resourceTemplate, func(ctx context.Context, request mcp.ReadResourceRequest) ([]mcp.ResourceContents, error) { + readResource, e := mcpClient.ReadResource(ctx, request) + if e != nil { + return nil, e + } + return readResource.Contents, nil + }) + } + + if resourceTemplates.NextCursor == "" { + break + } + + resourceTemplatesRequest.Params.Cursor = resourceTemplates.NextCursor + } + + return nil +} + +func addNotificationsToServer(ctx context.Context, mcpClient *client.Client, mcpServer *server.MCPServer) error { + mcpClient.OnNotification(func(notification mcp.JSONRPCNotification) { + mcpServer.SendNotificationToAllClients(notification.Notification.Method, notification.Notification.Params.AdditionalFields) + }) + + return nil +} diff --git a/internal/command/mcp/proxy/types.go b/internal/command/mcp/proxy/types.go new file mode 100644 index 0000000000..767f0a38d8 --- /dev/null +++ b/internal/command/mcp/proxy/types.go @@ -0,0 +1,12 @@ +package mcpProxy + +type ProxyInfo struct { + Url string + BearerToken string + User string + Password string + Instance string + Mode string // "passthru" or "sse" or "stream" + Timeout int // Timeout in seconds for the request + Ping bool +} diff --git a/internal/command/mcp/server.go b/internal/command/mcp/server.go new file mode 100644 index 0000000000..d3324c3961 --- /dev/null +++ b/internal/command/mcp/server.go @@ -0,0 +1,494 @@ +package mcp + +import ( + "context" + "fmt" + "net/http" + "os" + "os/exec" + "os/signal" + "slices" + "strconv" + "strings" + "syscall" + + mcpGo "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "github.com/spf13/cobra" + "github.com/superfly/flyctl/internal/buildinfo" + "github.com/superfly/flyctl/internal/command" + mcpServer "github.com/superfly/flyctl/internal/command/mcp/server" + "github.com/superfly/flyctl/internal/config" + "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flag/flagnames" +) + +var COMMANDS = slices.Concat( + mcpServer.AppCommands, + mcpServer.CertsCommands, + mcpServer.IPCommands, + mcpServer.LogCommands, + mcpServer.MachineCommands, + mcpServer.OrgCommands, + mcpServer.PlatformCommands, + mcpServer.SecretsCommands, + mcpServer.StatusCommands, + mcpServer.VolumeCommands, +) + +type contextKey string + +const authTokenKey contextKey = "authToken" + +func newServer() *cobra.Command { + const ( + short = "[experimental] Start a flyctl MCP server" + long = short + "\n" + usage = "server" + ) + + cmd := command.New(usage, short, long, runServer) + cmd.Args = cobra.ExactArgs(0) + + flag.Add(cmd, + flag.Bool{ + Name: "inspector", + Description: "Launch MCP inspector: a developer tool for testing and debugging MCP servers", + Default: false, + Shorthand: "i", + }, + flag.String{ + Name: "server", + Description: "Name to use for the MCP server in the MCP client configuration", + }, + flag.StringArray{ + Name: "config", + Description: "Path to the MCP client configuration file (can be specified multiple times)", + }, + flag.Bool{ + Name: "stream", + Description: "Enable HTTP streaming output for MCP commands", + }, + flag.Bool{ + Name: "sse", + Description: "Enable Server-Sent Events (SSE) for MCP commands", + }, + flag.Int{ + Name: "port", + Description: "Port to run the MCP server on (default is 8080)", + Default: 8080, + }, + flag.String{ + Name: flagnames.BindAddr, + Shorthand: "b", + Default: "127.0.0.1", + Description: "Local address to bind to", + }, + ) + + for client, name := range McpClients { + flag.Add(cmd, + flag.Bool{ + Name: client, + Description: "Add flyctl MCP server to the " + name + " client configuration", + }, + ) + } + + return cmd +} + +func runServer(ctx context.Context) error { + flyctl, err := os.Executable() + if err != nil { + return fmt.Errorf("failed to find executable: %w", err) + } + + configs, err := ListConfigPaths(ctx, true) + if err != nil { + return fmt.Errorf("failed to list MCP client configuration paths: %w", err) + } + + stream := flag.GetBool(ctx, "stream") + sse := flag.GetBool(ctx, "sse") + + if flag.GetBool(ctx, "inspector") || len(configs) > 0 { + server := flag.GetString(ctx, "server") + if server == "" { + server = "flyctl" + } + + args := []string{"mcp", "server"} + + if stream || sse { + args = []string{ + "mcp", + "proxy", + "--url", + fmt.Sprintf("http://localhost:%d", flag.GetInt(ctx, "port")), + } + + if token := getAccessToken(ctx); token != "" { + args = append(args, "--bearer-token", token) + } + + if stream { + args = append(args, "--stream") + } else { + args = append(args, "--sse") + } + } + + if len(configs) > 0 { + for _, config := range configs { + UpdateConfig(ctx, config.Path, config.ConfigName, server, flyctl, args) + } + } + + if flag.GetBool(ctx, "inspector") { + var process *os.Process + + // If sse or stream, start flyctl mcp server in the background + if stream || sse { + args := []string{"mcp", "server", "--port", strconv.Itoa(flag.GetInt(ctx, "port"))} + + if token := getAccessToken(ctx); token != "" { + args = append(args, "--access-token", token) + } + + if stream { + args = append(args, "--stream") + } else { + args = append(args, "--sse") + } + + cmd := exec.Command(flyctl, args...) + cmd.Env = os.Environ() + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Start(); err != nil { + return fmt.Errorf("failed to start flyctl mcp server in background: %w", err) + } + + process = cmd.Process + } + + // Launch MCP inspector + args = append([]string{"@modelcontextprotocol/inspector@latest", flyctl}, args...) + cmd := exec.Command("npx", args...) + cmd.Env = os.Environ() + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to launch MCP inspector: %w", err) + } + + if process != nil { + // Attempt to kill the background process after inspector exits + if err := process.Kill(); err != nil { + fmt.Fprintf(os.Stderr, "failed to kill background flyctl mcp server: %v\n", err) + } + } + } + + return nil + } + + // Create MCP server + srv := server.NewMCPServer( + "FlyMCP 🚀", + buildinfo.Info().Version.String(), + ) + + // Register commands + for _, cmd := range COMMANDS { + // Create a tool function for each command + tool := func(ctx context.Context, request mcpGo.CallToolRequest) (*mcpGo.CallToolResult, error) { + // Extract arguments from the request + args := make(map[string]string) + argMap, ok := request.Params.Arguments.(map[string]any) + if !ok { + return nil, fmt.Errorf("invalid arguments: expected map[string]any") + } + for argName, argValue := range argMap { + description, ok := cmd.ToolArgs[argName] + if !ok { + return nil, fmt.Errorf("unknown argument %s", argName) + } + + if description.Required && argValue == nil { + return nil, fmt.Errorf("argument %s is required", argName) + } + + switch description.Type { + case "string": + if strValue, ok := argValue.(string); ok { + args[argName] = strValue + } else { + return nil, fmt.Errorf("argument %s must be a string", argName) + } + case "enum": + if strValue, ok := argValue.(string); ok { + if !slices.Contains(description.Enum, strValue) { + return nil, fmt.Errorf("argument %s must be one of %v", argName, description.Enum) + } + args[argName] = strValue + } else { + return nil, fmt.Errorf("argument %s must be a string", argName) + } + case "array": + if arrValue, ok := argValue.([]any); ok { + if len(arrValue) > 0 { + strArr := make([]string, len(arrValue)) + for i, v := range arrValue { + if str, ok := v.(string); ok { + strArr[i] = str + } else { + return nil, fmt.Errorf("argument %s must be an array of strings", argName) + } + } + args[argName] = strings.Join(strArr, ",") + } + } else { + return nil, fmt.Errorf("argument %s must be an array of strings", argName) + } + case "hash": + if arrValue, ok := argValue.([]any); ok { + if len(arrValue) > 0 { + strArr := make([]string, len(arrValue)) + for i, v := range arrValue { + if str, ok := v.(string); ok { + // Simple shell escaping: wrap value in single quotes and escape any single quotes inside + str = "'" + strings.ReplaceAll(str, "'", "'\\''") + "'" + strArr[i] = str + } else { + return nil, fmt.Errorf("argument %s must be an array of strings", argName) + } + } + args[argName] = strings.Join(strArr, " ") + } + } else { + return nil, fmt.Errorf("argument %s must be an array of strings", argName) + } + case "number": + if numValue, ok := argValue.(float64); ok { + args[argName] = strconv.FormatFloat(numValue, 'f', -1, 64) + } else { + return nil, fmt.Errorf("argument %s must be a number", argName) + } + case "boolean": + if boolValue, ok := argValue.(bool); ok { + args[argName] = strconv.FormatBool(boolValue) + } else { + return nil, fmt.Errorf("argument %s must be a boolean", argName) + } + default: + return nil, fmt.Errorf("unsupported argument type %s for argument %s", description.Type, argName) + } + } + + // Check for required arguments and fill in defaults + for argName, description := range cmd.ToolArgs { + if description.Required { + if _, ok := args[argName]; !ok { + return nil, fmt.Errorf("missing required argument %s", argName) + } + } else if description.Default != "" { + if _, ok := args[argName]; !ok { + args[argName] = description.Default + } + } + } + + // Call the builder function to get the command arguments + cmdArgs, err := cmd.Builder(args) + if err != nil { + return nil, fmt.Errorf("failed to build command: %w", err) + } + + // Log the command (without the auth token and any secret values) + if len(cmdArgs) >= 2 && cmdArgs[0] == "secrets" && cmdArgs[1] == "set" { + redactedCmdArgs := append([]string(nil), cmdArgs...) + for i, arg := range redactedCmdArgs[2:] { + if strings.Contains(arg, "=") { + parts := strings.SplitN(arg, "=", 2) + redactedCmdArgs[i+2] = parts[0] + "=REDACTED" + } + } + fmt.Fprintf(os.Stderr, "Executing flyctl command: %v\n", redactedCmdArgs) + } else { + fmt.Fprintf(os.Stderr, "Executing flyctl command: %v\n", cmdArgs) + } + + // If auth token is present in context, add --access-token flag + if token, ok := ctx.Value(authTokenKey).(string); ok && token != "" { + cmdArgs = append(cmdArgs, "--access-token", token) + } + + // Execute the command + execCmd := exec.Command(flyctl, cmdArgs...) + output, err := execCmd.CombinedOutput() + if err != nil { + fmt.Fprintf(os.Stderr, "Error executing flyctl: %v\nOutput: %s\n", err, string(output)) + return nil, fmt.Errorf("failed to execute command: %v\nOutput: %s", err, string(output)) + } + + // Return the output as a tool result + return mcpGo.NewToolResultText(string(output)), nil + } + + // Register the tool with the server + toolOptions := []mcpGo.ToolOption{ + mcpGo.WithDescription(cmd.ToolDescription), + } + + for argName, arg := range cmd.ToolArgs { + options := []mcpGo.PropertyOption{ + mcpGo.Description(arg.Description), + } + + if arg.Required { + options = append(options, mcpGo.Required()) + } + + switch arg.Type { + case "string": + if arg.Default != "" { + options = append(options, mcpGo.DefaultString(arg.Default)) + } + + toolOptions = append(toolOptions, mcpGo.WithString(argName, options...)) + + case "enum": + if arg.Default != "" { + if slices.Contains(arg.Enum, arg.Default) { + options = append(options, mcpGo.DefaultString(arg.Default)) + } else { + return fmt.Errorf("invalid default value for argument %s: %s is not in enum %v", argName, arg.Default, arg.Enum) + } + } + + if len(arg.Enum) > 0 { + options = append(options, mcpGo.Enum(arg.Enum...)) + } else { + return fmt.Errorf("enum argument %s must have at least one value", argName) + } + + toolOptions = append(toolOptions, mcpGo.WithString(argName, options...)) + + case "array": + schema := map[string]any{"type": "string"} + options = append(options, mcpGo.Items(schema)) + + toolOptions = append(toolOptions, mcpGo.WithArray(argName, options...)) + + case "number": + if arg.Default != "" { + if defaultValue, err := strconv.ParseFloat(arg.Default, 64); err == nil { + options = append(options, mcpGo.DefaultNumber(defaultValue)) + } else { + return fmt.Errorf("invalid default value for argument %s: %v", argName, err) + } + } + + toolOptions = append(toolOptions, mcpGo.WithNumber(argName, options...)) + + case "boolean": + if arg.Default != "" { + if defaultValue, err := strconv.ParseBool(arg.Default); err == nil { + options = append(options, mcpGo.DefaultBool(defaultValue)) + } else { + return fmt.Errorf("invalid default value for argument %s: %v", argName, err) + } + } + + toolOptions = append(toolOptions, mcpGo.WithBoolean(argName, options...)) + + case "hash": + schema := map[string]any{"type": "string"} + options = append(options, mcpGo.Items(schema)) + + toolOptions = append(toolOptions, mcpGo.WithArray(argName, options...)) + + default: + return fmt.Errorf("unsupported argument type %s for argument %s", arg.Type, argName) + } + } + + srv.AddTool( + mcpGo.NewTool(cmd.ToolName, toolOptions...), + tool, + ) + } + + if defaultToken := getAccessToken(ctx); defaultToken != "" { + ctx = context.WithValue(ctx, authTokenKey, defaultToken) + } + + if stream || sse { + port := flag.GetInt(ctx, "port") + var start func(string) error + var err error + + // enable graceful shutdown on signals + sigc := make(chan os.Signal, 1) + signal.Notify(sigc, + syscall.SIGHUP, + syscall.SIGINT, + syscall.SIGTERM, + syscall.SIGQUIT) + go func() { + <-sigc + os.Exit(0) + }() + + // Function to extract the auth token from the request context + extractAuthToken := func(ctx context.Context, r *http.Request) context.Context { + authHeader := r.Header.Get("Authorization") + if authHeader != "" { + // Extract the token from the Authorization header + token := strings.TrimPrefix(authHeader, "Bearer ") + if token != authHeader { // Ensure it was a Bearer token + return context.WithValue(ctx, authTokenKey, token) + } + } + + return ctx + } + + if stream { + fmt.Fprintf(os.Stderr, "Starting flyctl MCP server in streaming mode on port %d...\n", port) + httpServer := server.NewStreamableHTTPServer(srv) + server.WithHTTPContextFunc(extractAuthToken)(httpServer) + start = httpServer.Start + } else { + fmt.Fprintf(os.Stderr, "Starting flyctl MCP server in SSE mode on port %d...\n", port) + sseServer := server.NewSSEServer(srv) + server.WithSSEContextFunc(extractAuthToken)(sseServer) + start = sseServer.Start + } + + if err = start(fmt.Sprintf("%s:%d", flag.GetString(ctx, flagnames.BindAddr), port)); err != nil { + return fmt.Errorf("Server error: %v", err) + } + } else { + fmt.Fprintf(os.Stderr, "Starting flyctl MCP server...\n") + if err := server.ServeStdio(srv); err != nil { + return fmt.Errorf("Server error: %v", err) + } + } + + return nil +} + +func getAccessToken(ctx context.Context) string { + token := flag.GetString(ctx, flagnames.AccessToken) + + if token == "" { + cfg := config.FromContext(ctx) + token = cfg.Tokens.GraphQL() + } + + return token +} diff --git a/internal/command/mcp/server/apps.go b/internal/command/mcp/server/apps.go new file mode 100644 index 0000000000..5f896efa71 --- /dev/null +++ b/internal/command/mcp/server/apps.go @@ -0,0 +1,220 @@ +package mcpServer + +import ( + "fmt" + "strconv" +) + +var AppCommands = []FlyCommand{ + { + ToolName: "fly-apps-create", + ToolDescription: "Create a new Fly.io app. If you don't specify a name, one will be generated for you.", + ToolArgs: map[string]FlyArg{ + "name": { + Description: "Name of the app", + Required: false, + Type: "string", + }, + "org": { + Description: "Slug of the organization to create the app in", + Required: true, + Type: "string", + }, + "network": { + Description: "Custom network id", + Required: false, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"apps", "create"} + + if name, ok := args["name"]; ok { + cmdArgs = append(cmdArgs, name) + } else { + cmdArgs = append(cmdArgs, "--generate-name") + } + + if org, ok := args["org"]; ok { + cmdArgs = append(cmdArgs, "--org", org) + } + + if network, ok := args["network"]; ok { + cmdArgs = append(cmdArgs, "--network", network) + } + + cmdArgs = append(cmdArgs, "--json") + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-apps-destroy", + ToolDescription: "Destroy a Fly.io app. All machines and volumes will be destroyed.", + ToolArgs: map[string]FlyArg{ + "name": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"apps", "destroy"} + + if name, ok := args["name"]; ok { + cmdArgs = append(cmdArgs, name) + } else { + return nil, fmt.Errorf("missing required argument: name") + } + + cmdArgs = append(cmdArgs, "--yes") + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-apps-list", + ToolDescription: "List all Fly.io apps in the organization", + ToolArgs: map[string]FlyArg{ + "org": { + Description: "Slug of the organization to list apps for", + Required: false, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"apps", "list"} + + if org, ok := args["org"]; ok { + cmdArgs = append(cmdArgs, "--org", org) + } + + cmdArgs = append(cmdArgs, "--json") + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-apps-move", + ToolDescription: "Move a Fly.io app to a different organization", + ToolArgs: map[string]FlyArg{ + "name": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + "org": { + Description: "Slug of the organization to move the app to", + Required: true, + Type: "string", + }, + "skip-health-checks": { + Description: "Skip health checks during the move", + Required: false, + Type: "boolean", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"apps", "move"} + + if name, ok := args["name"]; ok { + cmdArgs = append(cmdArgs, name) + } else { + return nil, fmt.Errorf("missing required argument: name") + } + + if org, ok := args["org"]; ok { + cmdArgs = append(cmdArgs, "--org", org) + } else { + return nil, fmt.Errorf("missing required argument: org") + } + + if skipHealthChecks, ok := args["skip-health-checks"]; ok { + if value, err := strconv.ParseBool(skipHealthChecks); err == nil && value { + cmdArgs = append(cmdArgs, "--skip-health-checks") + } + } + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-apps-releases", + ToolDescription: "List all releases for a Fly.io app, including type, when, success/fail and which user triggered the release.", + ToolArgs: map[string]FlyArg{ + "name": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"apps", "releases"} + + if name, ok := args["name"]; ok { + cmdArgs = append(cmdArgs, "--app", name) + } else { + return nil, fmt.Errorf("missing required argument: name") + } + + cmdArgs = append(cmdArgs, "--json") + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-apps-restart", + ToolDescription: "Restart a Fly.io app. Perform a rolling restart against all running Machines.", + ToolArgs: map[string]FlyArg{ + "name": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + "force-stop": { + Description: "Force stop the app before restarting", + Required: false, + Type: "boolean", + }, + "skip-health-checks": { + Description: "Skip health checks during the restart", + Required: false, + Type: "boolean", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"apps", "restart"} + + if name, ok := args["name"]; ok { + cmdArgs = append(cmdArgs, name) + } else { + return nil, fmt.Errorf("missing required argument: name") + } + + if forceStop, ok := args["force-stop"]; ok { + if value, err := strconv.ParseBool(forceStop); err == nil && value { + cmdArgs = append(cmdArgs, "--force-stop") + } + } + + if skipHealthChecks, ok := args["skip-health-checks"]; ok { + if value, err := strconv.ParseBool(skipHealthChecks); err == nil && value { + cmdArgs = append(cmdArgs, "--skip-health-checks") + } + } + + return cmdArgs, nil + }, + }, +} diff --git a/internal/command/mcp/server/certs.go b/internal/command/mcp/server/certs.go new file mode 100644 index 0000000000..474dbf6d93 --- /dev/null +++ b/internal/command/mcp/server/certs.go @@ -0,0 +1,162 @@ +package mcpServer + +import ( + "fmt" +) + +var CertsCommands = []FlyCommand{ + { + ToolName: "fly-certs-add", + ToolDescription: "Add an SSL/TLS certificate for a Fly.io app.", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + "hostname": { + Description: "The hostname to add a certificate for (e.g. www.example.com)", + Required: true, + Type: "string", + }, + }, + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"certs", "add", "--json"} + + app, ok := args["app"] + if !ok || app == "" { + return nil, fmt.Errorf("missing required argument: app") + } + cmdArgs = append(cmdArgs, "-a", app) + + hostname, ok := args["hostname"] + if !ok || hostname == "" { + return nil, fmt.Errorf("missing required argument: hostname") + } + cmdArgs = append(cmdArgs, hostname) + + return cmdArgs, nil + }, + }, + { + ToolName: "fly-certs-check", + ToolDescription: "Check the status of an SSL/TLS certificate for a Fly.io app.", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + "hostname": { + Description: "The hostname to check the certificate for (e.g. www.example.com)", + Required: true, + Type: "string", + }, + }, + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"certs", "check", "--json"} + + app, ok := args["app"] + if !ok || app == "" { + return nil, fmt.Errorf("missing required argument: app") + } + cmdArgs = append(cmdArgs, "-a", app) + + hostname, ok := args["hostname"] + if !ok || hostname == "" { + return nil, fmt.Errorf("missing required argument: hostname") + } + cmdArgs = append(cmdArgs, hostname) + + return cmdArgs, nil + }, + }, + { + ToolName: "fly-certs-list", + ToolDescription: "List all SSL/TLS certificates for a Fly.io app.", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + }, + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"certs", "list", "--json"} + + app, ok := args["app"] + if !ok || app == "" { + return nil, fmt.Errorf("missing required argument: app") + } + cmdArgs = append(cmdArgs, "-a", app) + + return cmdArgs, nil + }, + }, + { + ToolName: "fly-certs-remove", + ToolDescription: "Remove an SSL/TLS certificate for a Fly.io app.", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + "hostname": { + Description: "The hostname to remove the certificate for (e.g. www.example.com)", + Required: true, + Type: "string", + }, + }, + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"certs", "remove", "--json"} + + app, ok := args["app"] + if !ok || app == "" { + return nil, fmt.Errorf("missing required argument: app") + } + cmdArgs = append(cmdArgs, "-a", app) + + hostname, ok := args["hostname"] + if !ok || hostname == "" { + return nil, fmt.Errorf("missing required argument: hostname") + } + cmdArgs = append(cmdArgs, hostname) + + return cmdArgs, nil + }, + }, + { + ToolName: "fly-certs-show", + ToolDescription: "Show details for an SSL/TLS certificate for a Fly.io app.", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + "hostname": { + Description: "The hostname to show the certificate for (e.g. www.example.com)", + Required: true, + Type: "string", + }, + }, + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"certs", "show", "--json"} + + app, ok := args["app"] + if !ok || app == "" { + return nil, fmt.Errorf("missing required argument: app") + } + cmdArgs = append(cmdArgs, "-a", app) + + hostname, ok := args["hostname"] + if !ok || hostname == "" { + return nil, fmt.Errorf("missing required argument: hostname") + } + cmdArgs = append(cmdArgs, hostname) + + return cmdArgs, nil + }, + }, +} diff --git a/internal/command/mcp/server/ips.go b/internal/command/mcp/server/ips.go new file mode 100644 index 0000000000..f5a4e04150 --- /dev/null +++ b/internal/command/mcp/server/ips.go @@ -0,0 +1,213 @@ +package mcpServer + +import ( + "fmt" + "strconv" + "strings" +) + +var IPCommands = []FlyCommand{ + { + ToolName: "fly-ips-allocate-v4", + ToolDescription: "Allocate an IPv4 address to the application. Dedicated IPv4 addresses cost $2/mo.", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + "shared": { + Description: "Allocate a shared IPv4 address instead of dedicated", + Required: false, + Type: "boolean", + Default: "false", + }, + "region": { + Description: "Region to allocate the IP address in", + Required: false, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"ips", "allocate-v4"} + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "--app", app) + } else { + return nil, fmt.Errorf("missing required argument: app") + } + + if shared, ok := args["shared"]; ok { + if value, err := strconv.ParseBool(shared); err == nil && value { + cmdArgs = append(cmdArgs, "--shared") + } + } + + if region, ok := args["region"]; ok { + cmdArgs = append(cmdArgs, "--region", region) + } + + // Always use --yes to avoid interactive prompts + cmdArgs = append(cmdArgs, "--yes") + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-ips-allocate-v6", + ToolDescription: "Allocate an IPv6 address to the application", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + "private": { + Description: "Allocate a private IPv6 address", + Required: false, + Type: "boolean", + Default: "false", + }, + "region": { + Description: "Region to allocate the IP address in", + Required: false, + Type: "string", + }, + "org": { + Description: "Organization slug (required for private IPv6)", + Required: false, + Type: "string", + }, + "network": { + Description: "Target network name for a Flycast private IPv6 address", + Required: false, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"ips", "allocate-v6"} + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "--app", app) + } else { + return nil, fmt.Errorf("missing required argument: app") + } + + if private, ok := args["private"]; ok { + if value, err := strconv.ParseBool(private); err == nil && value { + cmdArgs = append(cmdArgs, "--private") + } + } + + if region, ok := args["region"]; ok { + cmdArgs = append(cmdArgs, "--region", region) + } + + if org, ok := args["org"]; ok { + cmdArgs = append(cmdArgs, "--org", org) + } + + if network, ok := args["network"]; ok { + cmdArgs = append(cmdArgs, "--network", network) + } + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-ips-list", + ToolDescription: "List all IP addresses allocated to the application", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"ips", "list"} + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "--app", app) + } else { + return nil, fmt.Errorf("missing required argument: app") + } + + cmdArgs = append(cmdArgs, "--json") + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-ips-release", + ToolDescription: "Release one or more IP addresses from the application", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + "addresses": { + Description: "IP addresses to release", + Required: true, + Type: "array", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"ips", "release"} + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "--app", app) + } else { + return nil, fmt.Errorf("missing required argument: app") + } + + if addresses, ok := args["addresses"]; ok { + // Split comma-separated addresses and add each as a separate argument + for _, addr := range strings.Split(addresses, ",") { + addr = strings.TrimSpace(addr) + if addr != "" { + cmdArgs = append(cmdArgs, addr) + } + } + } else { + return nil, fmt.Errorf("missing required argument: addresses") + } + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-ips-private", + ToolDescription: "List instances' private IP addresses, accessible from within the Fly network", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"ips", "private"} + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "--app", app) + } else { + return nil, fmt.Errorf("missing required argument: app") + } + + cmdArgs = append(cmdArgs, "--json") + + return cmdArgs, nil + }, + }, +} diff --git a/internal/command/mcp/server/logs.go b/internal/command/mcp/server/logs.go new file mode 100644 index 0000000000..2d7b3b23c0 --- /dev/null +++ b/internal/command/mcp/server/logs.go @@ -0,0 +1,42 @@ +package mcpServer + +var LogCommands = []FlyCommand{ + { + ToolName: "fly-logs", + ToolDescription: "Get logs for a Fly.io app or specific machine", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + "machine": { + Description: "Specific machine ID", + Required: false, + Type: "string", + }, + "region": { + Description: "Region to get logs from", + Required: false, + Type: "string", + }, + }, + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"logs", "--no-tail"} + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } + + if machine, ok := args["machine"]; ok { + cmdArgs = append(cmdArgs, "--machine", machine) + } + + if region, ok := args["region"]; ok { + cmdArgs = append(cmdArgs, "--region", region) + } + + return cmdArgs, nil + }, + }, +} diff --git a/internal/command/mcp/server/machine.go b/internal/command/mcp/server/machine.go new file mode 100644 index 0000000000..f1c1b9736e --- /dev/null +++ b/internal/command/mcp/server/machine.go @@ -0,0 +1,1669 @@ +package mcpServer + +import ( + "fmt" + "strconv" +) + +var MachineCommands = []FlyCommand{ + { + ToolName: "fly-machine-clone", + ToolDescription: "Clone a Fly Machine. The new Machine will be a copy of the specified Machine. If the original Machine has a volume, then a new empty volume will be created and attached to the new Machine.", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: false, + Type: "string", + }, + "id": { + Description: "ID of the machine to clone", + Required: true, + Type: "string", + }, + "attach-volume": { + Description: "Attach a volume to the new machine", + Required: false, + Type: "string", + }, + "clear-auto-destroy": { + Description: "Disable auto destroy setting on the new Machine", + Required: false, + Type: "boolean", + }, + "clear-cmd": { + Description: "Set empty CMD on the new Machine so it uses default CMD for the image", + Required: false, + Type: "boolean", + }, + "from-snapshot": { + Description: "Clone attached volumes and restore from snapshot, use 'last' for most recent snapshot. The default is an empty volume.", + Required: false, + Type: "string", + }, + "host-dedication-id": { + Description: "The dedication id of the reserved hosts for your organization (if any)", + Required: false, + Type: "string", + }, + "name": { + Description: "Optional name of the new machine", + Required: false, + Type: "string", + }, + "override-cmd": { + Description: "Set CMD on the new Machine to this value", + Required: false, + Type: "string", + }, + "region": { + Description: "Region to create the new machine in", + Required: false, + Type: "string", + }, + "standby-for": { + Description: "Standby for a machine in the same region", + Required: false, + Type: "array", + }, + "vm-cpu-kind": { + Description: "The CPU kind to use for the new machine", + Required: false, + Type: "enum", + Enum: []string{"shared", "dedicated"}, + }, + "vm-cpus": { + Description: "The number of CPUs to use for the new machine", + Required: false, + Type: "number", + }, + "vm-gpu-kind": { + Description: "If set, the GPU model to attach", + Required: false, + Type: "enum", + Enum: []string{"a100-pcie-40gb", "a100-sxm4-80gb", "l40s", "a10", "none"}, + }, + "vm-gpus": { + Description: "The number of GPUs to use for the new machine", + Required: false, + Type: "number", + }, + "vm-memory": { + Description: "The amount of memory (in megabytes) to use for the new machine", + Required: false, + Type: "number", + }, + "vm-size": { + Description: `The VM size to set machines to. See "fly platform vm-sizes" for valid values`, + Required: false, + Type: "string", + }, + "volume-requires-unique-zone": { + Description: "Require volume to be placed in separate hardware zone from existing volumes.", + Required: false, + Type: "boolean", + Default: "true", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"machine", "clone"} + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, id) + } else { + return nil, fmt.Errorf("missing required argument: id") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } + + if attachVolume, ok := args["attach-volume"]; ok { + cmdArgs = append(cmdArgs, "--attach-volume", attachVolume) + } + + if clearAutoDestroy, ok := args["clear-auto-destroy"]; ok { + value, err := strconv.ParseBool(clearAutoDestroy) + if err != nil { + return nil, fmt.Errorf("invalid value for clear-auto-destroy: %v", err) + } else if value { + cmdArgs = append(cmdArgs, "--clear-auto-destroy") + } + } + + if clearCmd, ok := args["clear-cmd"]; ok { + value, err := strconv.ParseBool(clearCmd) + if err != nil { + return nil, fmt.Errorf("invalid value for clear-cmd: %v", err) + } else if value { + cmdArgs = append(cmdArgs, "--clear-cmd") + } + } + + if fromSnapshot, ok := args["from-snapshot"]; ok { + cmdArgs = append(cmdArgs, "--from-snapshot", fromSnapshot) + } + + if hostDedicationID, ok := args["host-dedication-id"]; ok { + cmdArgs = append(cmdArgs, "--host-dedication-id", hostDedicationID) + } + + if name, ok := args["name"]; ok { + cmdArgs = append(cmdArgs, "--name", name) + } + + if overrideCmd, ok := args["override-cmd"]; ok { + cmdArgs = append(cmdArgs, "--override-cmd", overrideCmd) + } + + if region, ok := args["region"]; ok { + cmdArgs = append(cmdArgs, "--region", region) + } + + if standbyFor, ok := args["standby-for"]; ok { + cmdArgs = append(cmdArgs, "--standby-for", standbyFor) + } + + if vmCpuKind, ok := args["vm-cpu-kind"]; ok { + cmdArgs = append(cmdArgs, "--vm-cpu-kind", vmCpuKind) + } + + if vmCpus, ok := args["vm-cpus"]; ok { + cmdArgs = append(cmdArgs, "--vm-cpus", vmCpus) + } + + if vmGpuKind, ok := args["vm-gpu-kind"]; ok { + cmdArgs = append(cmdArgs, "--vm-gpu-kind", vmGpuKind) + } + + if vmGpus, ok := args["vm-gpus"]; ok { + cmdArgs = append(cmdArgs, "--vm-gpus", vmGpus) + } + + if vmMemory, ok := args["vm-memory"]; ok { + cmdArgs = append(cmdArgs, "--vm-memory", vmMemory) + } + + if vmSize, ok := args["vm-size"]; ok { + cmdArgs = append(cmdArgs, "--vm-size", vmSize) + } + + if volumeRequiresUniqueZone, ok := args["volume-requires-unique-zone"]; ok { + value, err := strconv.ParseBool(volumeRequiresUniqueZone) + if err != nil { + return nil, fmt.Errorf("invalid value for volume-requires-unique-zone: %v", err) + } else if !value { + cmdArgs = append(cmdArgs, "--volume-requires-unique-zone=false") + } + } + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-machine-cordon", + ToolDescription: "Deactivate all services on a machine", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: false, + Type: "string", + }, + "id": { + Description: "ID of the machine to cordon", + Required: true, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"machine", "cordon"} + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, id) + } else { + return nil, fmt.Errorf("missing required argument: id") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } + + cmdArgs = append(cmdArgs, "--verbose") + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-machine-create", + ToolDescription: "Create, but don’t start, a machine", + ToolArgs: map[string]FlyArg{ + // missing: build-depot, build-nixpacks, dockerfile, file-literal, file-local, file-secret, + // kernel-arg, machine-config, org + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + "autostart": { + Description: "Automatically start a stopped Machine when a network request is received", + Required: false, + Type: "boolean", + Default: "true", + }, + "autostop": { + Description: "Automatically stop a Machine when there are no network requests for it", + Required: false, + Type: "enum", + Enum: []string{"off", "stop", "suspend"}, + Default: "off", + }, + "entrypoint": { + Description: "The command to override the Docker ENTRYPOINT", + Required: false, + Type: "string", + }, + "env": { + Description: "Set of environment variables in the form of NAME=VALUE pairs.", + Required: false, + Type: "array", + }, + "host-dedication-id": { + Description: "The dedication id of the reserved hosts for your organization (if any)", + Required: false, + Type: "string", + }, + "id": { + Description: "Machine ID, if previously known", + Required: false, + Type: "string", + }, + "image": { + Description: "The image to use for the new machine", + Required: true, + Type: "string", + }, + "metadata": { + Description: "Set of metadata in the form of NAME=VALUE pairs.", + Required: false, + Type: "array", + }, + "name": { + Description: "Name of the new machine. Will be generated if omitted.", + Required: false, + Type: "string", + }, + "port": { + Description: "The external ports and handlers for services, in the format: port[:machinePort][/protocol[:handler[:handler...]]])", + Required: false, + Type: "array", + }, + "region": { + Description: "Region to create the new machine in", + Required: false, + Type: "string", + }, + "restart": { + Description: "Restart policy for the new machine", + Required: false, + Type: "enum", + Enum: []string{"no", "always", "on-fail"}, + }, + "rm": { + Description: "Automatically remove the Machine when it exits", + Required: false, + Type: "boolean", + }, + "schedule": { + Description: "Schedule for the new machine", + Required: false, + Type: "enum", + Enum: []string{"hourly", "daily", "monthly"}, + }, + "skip-dns-registration": { + Description: "Skip DNS registration for the new machine", + Required: false, + Type: "boolean", + }, + "standby-for": { + Description: "For Machines without services, a comma separated list of Machine IDs to act as standby for.", + Required: false, + Type: "array", + }, + "use-zstd": { + Description: "Use zstd compression for the image", + Required: false, + Type: "boolean", + }, + "vm-cpu-kind": { + Description: "The CPU kind to use for the new machine", + Required: false, + Type: "enum", + Enum: []string{"shared", "dedicated"}, + }, + "vm-cpus": { + Description: "The number of CPUs to use for the new machine", + Required: false, + Type: "number", + }, + "vm-gpu-kind": { + Description: "If set, the GPU model to attach", + Required: false, + Type: "enum", + Enum: []string{"a100-pcie-40gb", "a100-sxm4-80gb", "l40s", "a10", "none"}, + }, + "vm-gpus": { + Description: "The number of GPUs to use for the new machine", + Required: false, + Type: "number", + }, + "vm-memory": { + Description: "The amount of memory (in megabytes) to use for the new machine", + Required: false, + Type: "number", + }, + "vm-size": { + Description: `The VM size to set machines to. See "fly platform vm-sizes" for valid values`, + Required: false, + Type: "string", + }, + "volume": { + Description: "Volume to mount, in the form of :/path/inside/machine[:]", + Required: false, + Type: "array", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"machine", "create"} + + if image, ok := args["image"]; ok { + cmdArgs = append(cmdArgs, image) + } else { + return nil, fmt.Errorf("missing required argument: image") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } else { + return nil, fmt.Errorf("missing required argument: app") + } + + if autostart, ok := args["autostart"]; ok { + value, err := strconv.ParseBool(autostart) + if err != nil { + return nil, fmt.Errorf("invalid value for autostart: %v", err) + } else if value { + cmdArgs = append(cmdArgs, "--autostart") + } + } + + if autostop, ok := args["autostop"]; ok { + cmdArgs = append(cmdArgs, "--autostop", autostop) + } + + if entrypoint, ok := args["entrypoint"]; ok { + cmdArgs = append(cmdArgs, "--entrypoint", entrypoint) + } + + if env, ok := args["env"]; ok { + cmdArgs = append(cmdArgs, "--env", env) + } + + if hostDedicationID, ok := args["host-dedication-id"]; ok { + cmdArgs = append(cmdArgs, "--host-dedication-id", hostDedicationID) + } + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, "--id", id) + } + + if metadata, ok := args["metadata"]; ok { + cmdArgs = append(cmdArgs, "--metadata", metadata) + } + + if name, ok := args["name"]; ok { + cmdArgs = append(cmdArgs, "--name", name) + } + + if port, ok := args["port"]; ok { + cmdArgs = append(cmdArgs, "--port", port) + } + + if region, ok := args["region"]; ok { + cmdArgs = append(cmdArgs, "--region", region) + } + + if restart, ok := args["restart"]; ok { + cmdArgs = append(cmdArgs, "--restart", restart) + } + + if rm, ok := args["rm"]; ok { + value, err := strconv.ParseBool(rm) + if err != nil { + return nil, fmt.Errorf("invalid value for rm: %v", err) + } else if value { + cmdArgs = append(cmdArgs, "--rm") + } + } + + if schedule, ok := args["schedule"]; ok { + cmdArgs = append(cmdArgs, "--schedule", schedule) + } + + if skipDnsRegistration, ok := args["skip-dns-registration"]; ok { + value, err := strconv.ParseBool(skipDnsRegistration) + if err != nil { + return nil, fmt.Errorf("invalid value for skip-dns-registration: %v", err) + } else if value { + cmdArgs = append(cmdArgs, "--skip-dns-registration") + } + } + + if standbyFor, ok := args["standby-for"]; ok { + cmdArgs = append(cmdArgs, "--standby-for", standbyFor) + } + + if useZstd, ok := args["use-zstd"]; ok { + value, err := strconv.ParseBool(useZstd) + if err != nil { + return nil, fmt.Errorf("invalid value for use-zstd: %v", err) + } else if value { + cmdArgs = append(cmdArgs, "--use-zstd") + } + } + + if vmCpuKind, ok := args["vm-cpu-kind"]; ok { + cmdArgs = append(cmdArgs, "--vm-cpu-kind", vmCpuKind) + } + + if vmCpus, ok := args["vm-cpus"]; ok { + cmdArgs = append(cmdArgs, "--vm-cpus", vmCpus) + } + + if vmGpuKind, ok := args["vm-gpu-kind"]; ok { + cmdArgs = append(cmdArgs, "--vm-gpu-kind", vmGpuKind) + } + + if vmGpus, ok := args["vm-gpus"]; ok { + cmdArgs = append(cmdArgs, "--vm-gpus", vmGpus) + } + + if vmMemory, ok := args["vm-memory"]; ok { + cmdArgs = append(cmdArgs, "--vm-memory", vmMemory) + } + + if vmSize, ok := args["vm-size"]; ok { + cmdArgs = append(cmdArgs, "--vm-size", vmSize) + } + + if volume, ok := args["volume"]; ok { + cmdArgs = append(cmdArgs, "--volume", volume) + } + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-machine-destroy", + ToolDescription: "Destroy one or more Fly machines. This command requires a machine to be in a stopped or suspended state unless the force flag is used.", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: false, + Type: "string", + }, + "id": { + Description: "ID of the machine to destroy", + Required: true, + Type: "string", + }, + "force": { + Description: "Force destroy the machine, even if it is running", + Required: false, + Type: "boolean", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"machine", "destroy"} + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, id) + } else { + return nil, fmt.Errorf("missing required argument: id") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } + + if force, ok := args["force"]; ok { + value, err := strconv.ParseBool(force) + if err != nil { + return nil, fmt.Errorf("invalid value for force: %v", err) + } else if value { + cmdArgs = append(cmdArgs, "--force") + } + } + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-machine-egress-ip-allocate", + ToolDescription: "Allocate a pair of static egress IPv4 and IPv6 for a machine", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: false, + Type: "string", + }, + "id": { + Description: "ID of the machine to allocate egress IP for", + Required: true, + Type: "string", + }, + }, + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"machine", "egress-ip", "allocate"} + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, id) + } else { + return nil, fmt.Errorf("missing required argument: id") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } + + cmdArgs = append(cmdArgs, "--yes") + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-machine-egress-ip-list", + ToolDescription: "List all static egress IPv4 and IPv6 for a machine", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: false, + Type: "string", + }, + "id": { + Description: "ID of the machine to list egress IP for", + Required: true, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"machine", "egress-ip", "list"} + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, id) + } else { + return nil, fmt.Errorf("missing required argument: id") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } + + cmdArgs = append(cmdArgs, "--verbose") + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-machine-egress-ip-release", + ToolDescription: "Release a pair of static egress IPv4 and IPv6 for a machine", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: false, + Type: "string", + }, + "id": { + Description: "ID of the machine to release egress IP for", + Required: true, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"machine", "egress-ip", "release"} + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, id) + } else { + return nil, fmt.Errorf("missing required argument: id") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } + + cmdArgs = append(cmdArgs, "--yes") + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-machine-exec", + ToolDescription: "Run a command on a machine", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: false, + Type: "string", + }, + "id": { + Description: "ID of the machine to run the command on", + Required: true, + Type: "string", + }, + "command": { + Description: "Command to run on the machine", + Required: true, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"machine", "exec"} + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, id) + } else { + return nil, fmt.Errorf("missing required argument: id") + } + + if command, ok := args["command"]; ok { + cmdArgs = append(cmdArgs, command) + } else { + return nil, fmt.Errorf("missing required argument: command") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-machine-leases-clear", + ToolDescription: "Clear the leases for a machine", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: false, + Type: "string", + }, + "id": { + Description: "ID of the machine to clear leases for", + Required: true, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"machine", "leases", "clear"} + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, id) + } else { + return nil, fmt.Errorf("missing required argument: id") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-machine-kill", + ToolDescription: "Kill (SIGKILL) a Fly machine", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: false, + Type: "string", + }, + "id": { + Description: "ID of the machine to kill", + Required: true, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"machine", "kill"} + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, id) + } else { + return nil, fmt.Errorf("missing required argument: id") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-machine-leases-view", + ToolDescription: "View machine leases", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: false, + Type: "string", + }, + "id": { + Description: "ID of the machine to list leases for", + Required: true, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"machine", "leases", "view"} + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, id) + } else { + return nil, fmt.Errorf("missing required argument: id") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } + + cmdArgs = append(cmdArgs, "--json") + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-machine-list", + ToolDescription: "List all machines for a Fly app", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"machine", "list"} + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } else { + return nil, fmt.Errorf("missing required argument: app") + } + + cmdArgs = append(cmdArgs, "--json") + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-machine-restart", + ToolDescription: "Restart a Fly machine", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: false, + Type: "string", + }, + "id": { + Description: "ID of the machine to restart", + Required: true, + Type: "string", + }, + "force": { + Description: "Force stop if it is running", + Required: false, + Type: "boolean", + }, + "signal": { + Description: "Signal to send to the machine", + Required: false, + Type: "string", + }, + "skip-health-checks": { + Description: "Skip health checks during the restart", + Required: false, + Type: "boolean", + }, + "time": { + Description: "Seconds to wait before killing the machine", + Required: false, + Type: "number", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"machine", "restart"} + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, id) + } else { + return nil, fmt.Errorf("missing required argument: id") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } + + if force, ok := args["force"]; ok { + value, err := strconv.ParseBool(force) + if err != nil { + return nil, fmt.Errorf("invalid value for force: %v", err) + } else if value { + cmdArgs = append(cmdArgs, "--force") + } + } + + if signal, ok := args["signal"]; ok { + cmdArgs = append(cmdArgs, "--signal", signal) + } + + if skipHealthChecks, ok := args["skip-health-checks"]; ok { + value, err := strconv.ParseBool(skipHealthChecks) + if err != nil { + return nil, fmt.Errorf("invalid value for skip-health-checks: %v", err) + } else if value { + cmdArgs = append(cmdArgs, "--skip-health-checks") + } + } + + if timeStr, ok := args["time"]; ok { + cmdArgs = append(cmdArgs, "--time", timeStr) + } + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-machine-run", + ToolDescription: "Run a machine", + ToolArgs: map[string]FlyArg{ + // missing: build-depot, build-nixpacks, dockerfile, file-literal, file-local, file-secret, + // kernel-arg, machine-config, org, wg + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + "autostart": { + Description: "Automatically start a stopped Machine when a network request is received", + Required: false, + Type: "boolean", + Default: "true", + }, + "autostop": { + Description: "Automatically stop a Machine when there are no network requests for it", + Required: false, + Type: "enum", + Enum: []string{"off", "stop", "suspend"}, + Default: "off", + }, + "command": { + Description: "Command to run on the machine", + Required: false, + Type: "string", + }, + "entrypoint": { + Description: "The command to override the Docker ENTRYPOINT", + Required: false, + Type: "string", + }, + "env": { + Description: "Set of environment variables in the form of NAME=VALUE pairs.", + Required: false, + Type: "array", + }, + "host-dedication-id": { + Description: "The dedication id of the reserved hosts for your organization (if any)", + Required: false, + Type: "string", + }, + "id": { + Description: "Machine ID, if previously known", + Required: false, + Type: "string", + }, + "image": { + Description: "The image to use for the new machine", + Required: true, + Type: "string", + }, + "metadata": { + Description: "Set of metadata in the form of NAME=VALUE pairs.", + Required: false, + Type: "array", + }, + "name": { + Description: "Name of the new machine. Will be generated if omitted.", + Required: false, + Type: "string", + }, + "port": { + Description: "The external ports and handlers for services, in the format: port[:machinePort][/protocol[:handler[:handler...]]])", + Required: false, + Type: "array", + }, + "region": { + Description: "Region to create the new machine in", + Required: false, + Type: "string", + }, + "restart": { + Description: "Restart policy for the new machine", + Required: false, + Type: "enum", + Enum: []string{"no", "always", "on-fail"}, + }, + "rm": { + Description: "Automatically remove the Machine when it exits", + Required: false, + Type: "boolean", + }, + "schedule": { + Description: "Schedule for the new machine", + Required: false, + Type: "enum", + Enum: []string{"hourly", "daily", "monthly"}, + }, + "skip-dns-registration": { + Description: "Skip DNS registration for the new machine", + Required: false, + Type: "boolean", + }, + "standby-for": { + Description: "For Machines without services, a comma separated list of Machine IDs to act as standby for.", + Required: false, + Type: "array", + }, + "use-zstd": { + Description: "Use zstd compression for the image", + Required: false, + Type: "boolean", + }, + "user": { + Description: "User to run the command as", + Required: false, + Type: "string", + }, + "vm-cpu-kind": { + Description: "The CPU kind to use for the new machine", + Required: false, + Type: "enum", + Enum: []string{"shared", "dedicated"}, + }, + "vm-cpus": { + Description: "The number of CPUs to use for the new machine", + Required: false, + Type: "number", + }, + "vm-gpu-kind": { + Description: "If set, the GPU model to attach", + Required: false, + Type: "enum", + Enum: []string{"a100-pcie-40gb", "a100-sxm4-80gb", "l40s", "a10", "none"}, + }, + "vm-gpus": { + Description: "The number of GPUs to use for the new machine", + Required: false, + Type: "number", + }, + "vm-memory": { + Description: "The amount of memory (in megabytes) to use for the new machine", + Required: false, + Type: "number", + }, + "vm-size": { + Description: `The VM size to set machines to. See "fly platform vm-sizes" for valid values`, + Required: false, + Type: "string", + }, + "volume": { + Description: "Volume to mount, in the form of :/path/inside/machine[:]", + Required: false, + Type: "array", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"machine", "run"} + + if image, ok := args["image"]; ok { + cmdArgs = append(cmdArgs, image) + } else { + return nil, fmt.Errorf("missing required argument: image") + } + + if command, ok := args["command"]; ok { + cmdArgs = append(cmdArgs, command) + } else { + return nil, fmt.Errorf("missing required argument: command") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } else { + return nil, fmt.Errorf("missing required argument: app") + } + + if autostart, ok := args["autostart"]; ok { + value, err := strconv.ParseBool(autostart) + if err != nil { + return nil, fmt.Errorf("invalid value for autostart: %v", err) + } else if value { + cmdArgs = append(cmdArgs, "--autostart") + } + } + + if autostop, ok := args["autostop"]; ok { + cmdArgs = append(cmdArgs, "--autostop", autostop) + } + + if entrypoint, ok := args["entrypoint"]; ok { + cmdArgs = append(cmdArgs, "--entrypoint", entrypoint) + } + + if env, ok := args["env"]; ok { + cmdArgs = append(cmdArgs, "--env", env) + } + + if hostDedicationID, ok := args["host-dedication-id"]; ok { + cmdArgs = append(cmdArgs, "--host-dedication-id", hostDedicationID) + } + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, "--id", id) + } + + if metadata, ok := args["metadata"]; ok { + cmdArgs = append(cmdArgs, "--metadata", metadata) + } + + if name, ok := args["name"]; ok { + cmdArgs = append(cmdArgs, "--name", name) + } + + if port, ok := args["port"]; ok { + cmdArgs = append(cmdArgs, "--port", port) + } + + if region, ok := args["region"]; ok { + cmdArgs = append(cmdArgs, "--region", region) + } + + if restart, ok := args["restart"]; ok { + cmdArgs = append(cmdArgs, "--restart", restart) + } + + if rm, ok := args["rm"]; ok { + value, err := strconv.ParseBool(rm) + if err != nil { + return nil, fmt.Errorf("invalid value for rm: %v", err) + } else if value { + cmdArgs = append(cmdArgs, "--rm") + } + } + + if schedule, ok := args["schedule"]; ok { + cmdArgs = append(cmdArgs, "--schedule", schedule) + } + + if skipDnsRegistration, ok := args["skip-dns-registration"]; ok { + value, err := strconv.ParseBool(skipDnsRegistration) + if err != nil { + return nil, fmt.Errorf("invalid value for skip-dns-registration: %v", err) + } else if value { + cmdArgs = append(cmdArgs, "--skip-dns-registration") + } + } + + if standbyFor, ok := args["standby-for"]; ok { + cmdArgs = append(cmdArgs, "--standby-for", standbyFor) + } + + if useZstd, ok := args["use-zstd"]; ok { + value, err := strconv.ParseBool(useZstd) + if err != nil { + return nil, fmt.Errorf("invalid value for use-zstd: %v", err) + } else if value { + cmdArgs = append(cmdArgs, "--use-zstd") + } + } + + if user, ok := args["user"]; ok { + cmdArgs = append(cmdArgs, "--user", user) + } + + if vmCpuKind, ok := args["vm-cpu-kind"]; ok { + cmdArgs = append(cmdArgs, "--vm-cpu-kind", vmCpuKind) + } + + if vmCpus, ok := args["vm-cpus"]; ok { + cmdArgs = append(cmdArgs, "--vm-cpus", vmCpus) + } + + if vmGpuKind, ok := args["vm-gpu-kind"]; ok { + cmdArgs = append(cmdArgs, "--vm-gpu-kind", vmGpuKind) + } + + if vmGpus, ok := args["vm-gpus"]; ok { + cmdArgs = append(cmdArgs, "--vm-gpus", vmGpus) + } + + if vmMemory, ok := args["vm-memory"]; ok { + cmdArgs = append(cmdArgs, "--vm-memory", vmMemory) + } + + if vmSize, ok := args["vm-size"]; ok { + cmdArgs = append(cmdArgs, "--vm-size", vmSize) + } + + if volume, ok := args["volume"]; ok { + cmdArgs = append(cmdArgs, "--volume", volume) + } + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-machine-start", + ToolDescription: "Start a Fly machine", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: false, + Type: "string", + }, + "id": { + Description: "ID of the machine to start", + Required: true, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"machine", "start"} + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, id) + } else { + return nil, fmt.Errorf("missing required argument: id") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-machine-status", + ToolDescription: "Show current status of a running machine", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: false, + Type: "string", + }, + "id": { + Description: "ID of the machine to show status for", + Required: true, + Type: "string", + }, + "display-config": { + Description: "Display the machine config", + Required: false, + Type: "boolean", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"machine", "status"} + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, id) + } else { + return nil, fmt.Errorf("missing required argument: id") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } + + if displayConfig, ok := args["display-config"]; ok { + value, err := strconv.ParseBool(displayConfig) + if err != nil { + return nil, fmt.Errorf("invalid value for display-config: %v", err) + } else if value { + cmdArgs = append(cmdArgs, "--display-config") + } + } + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-machine-stop", + ToolDescription: "Stop a Fly machine", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: false, + Type: "string", + }, + "id": { + Description: "ID of the machine to stop", + Required: true, + Type: "string", + }, + "signal": { + Description: "Signal to send to the machine", + Required: false, + Type: "string", + }, + "timeout": { + Description: "Seconds to wait before killing the machine", + Required: false, + Type: "number", + }, + "wait-timeout": { + Description: "Seconds to wait for the machine to stop", + Required: false, + Type: "number", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"machine", "stop"} + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, id) + } else { + return nil, fmt.Errorf("missing required argument: id") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } + + if signal, ok := args["signal"]; ok { + cmdArgs = append(cmdArgs, "--signal", signal) + } + + if timeoutStr, ok := args["timeout"]; ok { + cmdArgs = append(cmdArgs, "--timeout", timeoutStr) + } + + if waitTimeoutStr, ok := args["wait-timeout"]; ok { + cmdArgs = append(cmdArgs, "--wait-timeout", waitTimeoutStr) + } + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-machine-suspend", + ToolDescription: "Suspend a Fly machine", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: false, + Type: "string", + }, + "id": { + Description: "ID of the machine to suspend", + Required: true, + Type: "string", + }, + "wait-timeout": { + Description: "Seconds to wait for the machine to suspend", + Required: false, + Type: "number", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"machine", "suspend"} + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, id) + } else { + return nil, fmt.Errorf("missing required argument: id") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } + + if waitTimeoutStr, ok := args["wait-timeout"]; ok { + cmdArgs = append(cmdArgs, "--wait-timeout", waitTimeoutStr) + } + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-machine-upcordon", + ToolDescription: "Reactivate all services on a machine", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: false, + Type: "string", + }, + "id": { + Description: "ID of the machine to upcordon", + Required: true, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"machine", "upcordon"} + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, id) + } else { + return nil, fmt.Errorf("missing required argument: id") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-machine-update", + ToolDescription: "Update a machine", + ToolArgs: map[string]FlyArg{ + // missing: build-depot, build-nixpacks, container, dockerfile, file-literal, file-local, file-secret, + // kernel-arg, machine-config + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + "id": { + Description: "Machine ID", + Required: true, + Type: "string", + }, + "autostart": { + Description: "Automatically start a stopped Machine when a network request is received", + Required: false, + Type: "boolean", + Default: "true", + }, + "autostop": { + Description: "Automatically stop a Machine when there are no network requests for it", + Required: false, + Type: "enum", + Enum: []string{"off", "stop", "suspend"}, + Default: "off", + }, + "command": { + Description: "Command to run on the machine", + Required: false, + Type: "string", + }, + "entrypoint": { + Description: "The command to override the Docker ENTRYPOINT", + Required: false, + Type: "string", + }, + "env": { + Description: "Set of environment variables in the form of NAME=VALUE pairs.", + Required: false, + Type: "array", + }, + "host-dedication-id": { + Description: "The dedication id of the reserved hosts for your organization (if any)", + Required: false, + Type: "string", + }, + "image": { + Description: "The image to use for the new machine", + Required: false, + Type: "string", + }, + "metadata": { + Description: "Set of metadata in the form of NAME=VALUE pairs.", + Required: false, + Type: "array", + }, + "port": { + Description: "The external ports and handlers for services, in the format: port[:machinePort][/protocol[:handler[:handler...]]])", + Required: false, + Type: "array", + }, + "restart": { + Description: "Restart policy for the new machine", + Required: false, + Type: "enum", + Enum: []string{"no", "always", "on-fail"}, + }, + "schedule": { + Description: "Schedule for the new machine", + Required: false, + Type: "enum", + Enum: []string{"hourly", "daily", "monthly"}, + }, + "skip-dns-registration": { + Description: "Skip DNS registration for the new machine", + Required: false, + Type: "boolean", + }, + "standby-for": { + Description: "For Machines without services, a comma separated list of Machine IDs to act as standby for.", + Required: false, + Type: "array", + }, + "use-zstd": { + Description: "Use zstd compression for the image", + Required: false, + Type: "boolean", + }, + "vm-cpu-kind": { + Description: "The CPU kind to use for the new machine", + Required: false, + Type: "enum", + Enum: []string{"shared", "dedicated"}, + }, + "vm-cpus": { + Description: "The number of CPUs to use for the new machine", + Required: false, + Type: "number", + }, + "vm-gpu-kind": { + Description: "If set, the GPU model to attach", + Required: false, + Type: "enum", + Enum: []string{"a100-pcie-40gb", "a100-sxm4-80gb", "l40s", "a10", "none"}, + }, + "vm-gpus": { + Description: "The number of GPUs to use for the new machine", + Required: false, + Type: "number", + }, + "vm-memory": { + Description: "The amount of memory (in megabytes) to use for the new machine", + Required: false, + Type: "number", + }, + "vm-size": { + Description: `The VM size to set machines to. See "fly platform vm-sizes" for valid values`, + Required: false, + Type: "string", + }, + "wait-timeout": { + Description: "Seconds to wait for the machine to update", + Required: false, + Type: "number", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"machine", "create"} + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, id) + } else { + return nil, fmt.Errorf("missing required argument: id") + } + + if image, ok := args["image"]; ok { + cmdArgs = append(cmdArgs, "--image", image) + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } else { + return nil, fmt.Errorf("missing required argument: app") + } + + if autostart, ok := args["autostart"]; ok { + value, err := strconv.ParseBool(autostart) + if err != nil { + return nil, fmt.Errorf("invalid value for autostart: %v", err) + } else if value { + cmdArgs = append(cmdArgs, "--autostart") + } + } + + if autostop, ok := args["autostop"]; ok { + cmdArgs = append(cmdArgs, "--autostop", autostop) + } + + if entrypoint, ok := args["entrypoint"]; ok { + cmdArgs = append(cmdArgs, "--entrypoint", entrypoint) + } + + if env, ok := args["env"]; ok { + cmdArgs = append(cmdArgs, "--env", env) + } + + if hostDedicationID, ok := args["host-dedication-id"]; ok { + cmdArgs = append(cmdArgs, "--host-dedication-id", hostDedicationID) + } + + if metadata, ok := args["metadata"]; ok { + cmdArgs = append(cmdArgs, "--metadata", metadata) + } + + if port, ok := args["port"]; ok { + cmdArgs = append(cmdArgs, "--port", port) + } + + if restart, ok := args["restart"]; ok { + cmdArgs = append(cmdArgs, "--restart", restart) + } + + if rm, ok := args["rm"]; ok { + value, err := strconv.ParseBool(rm) + if err != nil { + return nil, fmt.Errorf("invalid value for rm: %v", err) + } else if value { + cmdArgs = append(cmdArgs, "--rm") + } + } + + if schedule, ok := args["schedule"]; ok { + cmdArgs = append(cmdArgs, "--schedule", schedule) + } + + if skipDnsRegistration, ok := args["skip-dns-registration"]; ok { + value, err := strconv.ParseBool(skipDnsRegistration) + if err != nil { + return nil, fmt.Errorf("invalid value for skip-dns-registration: %v", err) + } else if value { + cmdArgs = append(cmdArgs, "--skip-dns-registration") + } + } + + if standbyFor, ok := args["standby-for"]; ok { + cmdArgs = append(cmdArgs, "--standby-for", standbyFor) + } + + if useZstd, ok := args["use-zstd"]; ok { + value, err := strconv.ParseBool(useZstd) + if err != nil { + return nil, fmt.Errorf("invalid value for use-zstd: %v", err) + } else if value { + cmdArgs = append(cmdArgs, "--use-zstd") + } + } + + if vmCpuKind, ok := args["vm-cpu-kind"]; ok { + cmdArgs = append(cmdArgs, "--vm-cpu-kind", vmCpuKind) + } + + if vmCpus, ok := args["vm-cpus"]; ok { + cmdArgs = append(cmdArgs, "--vm-cpus", vmCpus) + } + + if vmGpuKind, ok := args["vm-gpu-kind"]; ok { + cmdArgs = append(cmdArgs, "--vm-gpu-kind", vmGpuKind) + } + + if vmGpus, ok := args["vm-gpus"]; ok { + cmdArgs = append(cmdArgs, "--vm-gpus", vmGpus) + } + + if vmMemory, ok := args["vm-memory"]; ok { + cmdArgs = append(cmdArgs, "--vm-memory", vmMemory) + } + + if vmSize, ok := args["vm-size"]; ok { + cmdArgs = append(cmdArgs, "--vm-size", vmSize) + } + + if waitTimeout, ok := args["wait-timeout"]; ok { + cmdArgs = append(cmdArgs, "--wait-timeout", waitTimeout) + } + + return cmdArgs, nil + }, + }, +} diff --git a/internal/command/mcp/server/orgs.go b/internal/command/mcp/server/orgs.go new file mode 100644 index 0000000000..99914d4031 --- /dev/null +++ b/internal/command/mcp/server/orgs.go @@ -0,0 +1,167 @@ +package mcpServer + +import "fmt" + +var OrgCommands = []FlyCommand{ + { + ToolName: "fly-orgs-create", + ToolDescription: "Create a new organization. Other users can be invited to join the organization later.", + ToolArgs: map[string]FlyArg{ + "name": { + Description: "Name of the organization", + Required: true, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"orgs", "create"} + + if name, ok := args["name"]; ok { + cmdArgs = append(cmdArgs, name) + } else { + return nil, fmt.Errorf("missing required argument: name") + } + + cmdArgs = append(cmdArgs, "--json") + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-orgs-delete", + ToolDescription: "Delete an organization. All apps and machines in the organization will be deleted.", + ToolArgs: map[string]FlyArg{ + "slug": { + Description: "Slug of the organization to delete", + Required: true, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"orgs", "delete"} + + if slug, ok := args["slug"]; ok { + cmdArgs = append(cmdArgs, slug) + } else { + return nil, fmt.Errorf("missing required argument: slug") + } + + cmdArgs = append(cmdArgs, "--yes") + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-orgs-invite", + ToolDescription: "Invite a user, by email, to join organization. The invitation will be sent, and the user will be pending until they respond.", + ToolArgs: map[string]FlyArg{ + "slug": { + Description: "Slug of the organization to invite the user to", + Required: true, + Type: "string", + }, + "email": { + Description: "Email address of the user to invite", + Required: true, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"orgs", "invite"} + + if slug, ok := args["slug"]; ok { + cmdArgs = append(cmdArgs, slug) + } else { + return nil, fmt.Errorf("missing required argument: slug") + } + + if email, ok := args["email"]; ok { + cmdArgs = append(cmdArgs, email) + } else { + return nil, fmt.Errorf("missing required argument: email") + } + + cmdArgs = append(cmdArgs, "--json") + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-orgs-list", + ToolDescription: "List all organizations the user is a member of. Keys are names of the organizations, values are slugs.", + ToolArgs: map[string]FlyArg{}, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"orgs", "list", "--json"} + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-orgs-remove", + ToolDescription: "Remove a user from an organization. The user will no longer have access to the organization.", + ToolArgs: map[string]FlyArg{ + "slug": { + Description: "Slug of the organization to remove the user from", + Required: true, + Type: "string", + }, + "email": { + Description: "Email address of the user to remove", + Required: true, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"orgs", "remove"} + + if slug, ok := args["slug"]; ok { + cmdArgs = append(cmdArgs, slug) + } else { + return nil, fmt.Errorf("missing required argument: slug") + } + + if email, ok := args["email"]; ok { + cmdArgs = append(cmdArgs, email) + } else { + return nil, fmt.Errorf("missing required argument: email") + } + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-orgs-show", + ToolDescription: "Shows information about an organization. Includes name, slug and type. Summarizes user permissions, DNS zones and associated member. Details full list of members and roles.", + ToolArgs: map[string]FlyArg{ + "slug": { + Description: "Slug of the organization to show", + Required: true, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"orgs", "show"} + + if slug, ok := args["slug"]; ok { + cmdArgs = append(cmdArgs, slug) + } else { + return nil, fmt.Errorf("missing required argument: slug") + } + + cmdArgs = append(cmdArgs, "--json") + + return cmdArgs, nil + }, + }, +} diff --git a/internal/command/mcp/server/platform.go b/internal/command/mcp/server/platform.go new file mode 100644 index 0000000000..aa4409cb6e --- /dev/null +++ b/internal/command/mcp/server/platform.go @@ -0,0 +1,38 @@ +package mcpServer + +import "github.com/superfly/flyctl/internal/command/platform" + +var PlatformCommands = []FlyCommand{ + { + ToolName: "fly-platform-regions", + ToolDescription: platform.RegionsCommandDesc, + ToolArgs: map[string]FlyArg{}, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"platform", "regions", "--json"} + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-platform-status", + ToolDescription: "Get the status of Fly's platform", + ToolArgs: map[string]FlyArg{}, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"platform", "status", "--json"} + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-platform-vm-sizes", + ToolDescription: "Get a list of VM sizes available for Fly apps", + ToolArgs: map[string]FlyArg{}, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"platform", "vm-sizes", "--json"} + return cmdArgs, nil + }, + }, +} diff --git a/internal/command/mcp/server/secrets.go b/internal/command/mcp/server/secrets.go new file mode 100644 index 0000000000..053ba8a5b2 --- /dev/null +++ b/internal/command/mcp/server/secrets.go @@ -0,0 +1,125 @@ +package mcpServer + +import ( + "fmt" + "strings" + + "github.com/google/shlex" +) + +var SecretsCommands = []FlyCommand{ + { + ToolName: "fly-secrets-deploy", + ToolDescription: "Deploy secrets to the specified app", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + }, + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"secrets", "deploy"} + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } else { + return nil, fmt.Errorf("missing required argument: app") + } + + return cmdArgs, nil + }, + }, + { + ToolName: "fly-secrets-list", + ToolDescription: "List secrets for the specified app", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + }, + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"secrets", "list", "--json"} + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } else { + return nil, fmt.Errorf("missing required argument: app") + } + + return cmdArgs, nil + }, + }, + { + ToolName: "fly-secrets-set", + ToolDescription: "Set secrets for the specified app; secrets are staged for the next deploy", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + "keyvalues": { + Description: "Secrets to set in KEY=VALUE format", + Required: true, + Type: "hash", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"secrets", "set", "--stage"} + + app, ok := args["app"] + if !ok || app == "" { + return nil, fmt.Errorf("missing required argument: app") + } + cmdArgs = append(cmdArgs, "-a", app) + + keyvalues, ok := args["keyvalues"] + if ok && keyvalues != "" { + args, err := shlex.Split(keyvalues) + if err != nil { + return nil, fmt.Errorf("failed to parse keyvalues: %w", err) + } + cmdArgs = append(cmdArgs, args...) + } + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-secrets-unset", + ToolDescription: "Unset secrets for the specified app; changes are staged for the next deploy", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + "names": { + Description: "Names of secrets to unset", + Required: true, + Type: "array", + }, + }, + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"secrets", "unset", "--stage"} + + app, ok := args["app"] + if !ok || app == "" { + return nil, fmt.Errorf("missing required argument: app") + } + cmdArgs = append(cmdArgs, "-a", app) + + names, ok := args["names"] + if ok && names != "" { + cmdArgs = append(cmdArgs, strings.Split(names, ",")...) + } + + return cmdArgs, nil + }, + }, +} diff --git a/internal/command/mcp/server/status.go b/internal/command/mcp/server/status.go new file mode 100644 index 0000000000..a96e54ceb7 --- /dev/null +++ b/internal/command/mcp/server/status.go @@ -0,0 +1,24 @@ +package mcpServer + +var StatusCommands = []FlyCommand{ + { + ToolName: "fly-status", + ToolDescription: "Get status of a Fly.io app", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + }, + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"status", "--json"} + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } + + return cmdArgs, nil + }, + }, +} diff --git a/internal/command/mcp/server/types.go b/internal/command/mcp/server/types.go new file mode 100644 index 0000000000..e8e81b95a5 --- /dev/null +++ b/internal/command/mcp/server/types.go @@ -0,0 +1,34 @@ +package mcpServer + +// This file defines the structure and types used for Fly commands in the MCP server. +// As JSON-RPC schema wrapped in MCP go functions is a bit verbose, we define a simpler +// structure here to make it easier to define and dispatch commands. This contains only +// the things needed to build the command line arguments for the flyctl CLI. + +// mcp.runServer defines each tool based on the definition found in the FlyCommand struct. + +// The tool function is responsible for converting the arguments into a slice of strings +// that can be passed to the Builder. This function should return an error if the arguments +// are invalid or if there is an issue building the command line arguments. + +// Argument values passed to the Builder are intended to be passed to exec.Command, and therefore +// are strings. The builder is responsible for constructing a flyctl command from the arguments, +// expressed as a slice of strings. The builder should return an error if there is an issue +// building the command line arguments, or if the arguments are invalid. + +// FlyCommand represents a command for the Fly MCP server +type FlyCommand struct { + ToolName string + ToolDescription string + ToolArgs map[string]FlyArg + Builder func(args map[string]string) ([]string, error) +} + +// FlyArg represents an argument for a Fly command +type FlyArg struct { + Description string + Required bool + Type string // "string", "enum", "array", "number", "boolean" + Default string + Enum []string +} diff --git a/internal/command/mcp/server/volumes.go b/internal/command/mcp/server/volumes.go new file mode 100644 index 0000000000..16fe4fa79e --- /dev/null +++ b/internal/command/mcp/server/volumes.go @@ -0,0 +1,426 @@ +package mcpServer + +import ( + "fmt" + "strconv" +) + +var VolumeCommands = []FlyCommand{ + { + ToolName: "fly-volumes-create", + ToolDescription: "Create a new volume for an app. Volumes are persistent storage for Fly Machines.", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + "name": { + Description: "name of the volume", + Required: true, + Type: "string", + }, + "encrypt": { + Description: "Encrypt the volume", + Required: false, + Type: "boolean", + Default: "true", + }, + "region": { + Description: "Region to create the volume in", + Required: true, + Type: "string", + }, + "size": { + Description: "Size of the volume in GB", + Required: false, + Type: "number", + Default: "1", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"volume", "create"} + + if name, ok := args["name"]; ok { + cmdArgs = append(cmdArgs, name) + } else { + return nil, fmt.Errorf("name argument is required") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } else { + return nil, fmt.Errorf("app argument is required") + } + + if encrypt, ok := args["encrypt"]; ok { + encryptBool, err := strconv.ParseBool(encrypt) + if err != nil { + return nil, fmt.Errorf("invalid value for encrypt: %v", err) + } else if !encryptBool { + cmdArgs = append(cmdArgs, "--no-encryption") + } + } + + if region, ok := args["region"]; ok { + cmdArgs = append(cmdArgs, "--region", region) + } else { + return nil, fmt.Errorf("region argument is required") + } + + if size, ok := args["size"]; ok { + cmdArgs = append(cmdArgs, "--size", size) + } + + cmdArgs = append(cmdArgs, "--yes", "--json") + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-volumes-destroy", + ToolDescription: "Destroy one or more volumes. When you destroy a volume, you permanently delete all its data.", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + "id": { + Description: "id of the volume", + Required: true, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"volume", "destroy"} + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, id) + } else { + return nil, fmt.Errorf("id argument is required") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } else { + return nil, fmt.Errorf("app argument is required") + } + + cmdArgs = append(cmdArgs, "--yes", "--verbose") + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-volumes-extend", + ToolDescription: "Extend a volume to a larger size. You can only extend a volume to a larger size.", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + "id": { + Description: "id of the volume", + Required: true, + Type: "string", + }, + "size": { + Description: "Size of the volume in Gigabytes", + Required: true, + Type: "number", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"volume", "extend"} + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, id) + } else { + return nil, fmt.Errorf("id argument is required") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } else { + return nil, fmt.Errorf("app argument is required") + } + + if size, ok := args["size"]; ok { + cmdArgs = append(cmdArgs, "--size", size) + } else { + return nil, fmt.Errorf("size argument is required") + } + + cmdArgs = append(cmdArgs, "--yes", "--json") + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-volumes-fork", + ToolDescription: "Fork the specified volume. Volume forking creates an independent copy of a storage volume for backup, testing, and experimentation without altering the original data.", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + "id": { + Description: "id of the volume", + Required: true, + Type: "string", + }, + "region": { + Description: "Region to create the new volume in", + Required: false, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"volume", "fork"} + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, id) + } else { + return nil, fmt.Errorf("id argument is required") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } else { + return nil, fmt.Errorf("app argument is required") + } + + if region, ok := args["region"]; ok { + cmdArgs = append(cmdArgs, "--region", region) + } + + cmdArgs = append(cmdArgs, "--json") + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-volumes-list", + ToolDescription: "List all volumes for an app. Volumes are persistent storage for Fly Machines.", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + "all": { + Description: "Show all volumes, including those that in destroyed states", + Required: false, + Type: "boolean", + Default: "false", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"volume", "list"} + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } else { + return nil, fmt.Errorf("app argument is required") + } + + if all, ok := args["all"]; ok { + allBool, err := strconv.ParseBool(all) + if err != nil { + return nil, fmt.Errorf("invalid value for all: %v", err) + } else if allBool { + cmdArgs = append(cmdArgs, "--all") + } + } + + cmdArgs = append(cmdArgs, "--json") + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-volumes-show", + ToolDescription: "Show details about a volume. Volumes are persistent storage for Fly Machines.", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + "id": { + Description: "id of the volume", + Required: true, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"volume", "show"} + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, id) + } else { + return nil, fmt.Errorf("id argument is required") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } else { + return nil, fmt.Errorf("app argument is required") + } + + cmdArgs = append(cmdArgs, "--json") + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-volumes-snapshots-create", + ToolDescription: "Create a snapshot of a volume. Snapshots are point-in-time copies of a volume.", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + "id": { + Description: "id of the volume", + Required: true, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"volume", "snapshots", "create"} + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, id) + } else { + return nil, fmt.Errorf("id argument is required") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } else { + return nil, fmt.Errorf("app argument is required") + } + + cmdArgs = append(cmdArgs, "--json") + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-volumes-snapshots-list", + ToolDescription: "List all snapshots for a volume. Snapshots are point-in-time copies of a volume.", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + "id": { + Description: "id of the volume", + Required: true, + Type: "string", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"volume", "snapshots", "list"} + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, id) + } else { + return nil, fmt.Errorf("id argument is required") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } else { + return nil, fmt.Errorf("app argument is required") + } + + cmdArgs = append(cmdArgs, "--json") + + return cmdArgs, nil + }, + }, + + { + ToolName: "fly-volumes-update", + ToolDescription: "Update a volume. You can activate or deactivate snapshotting, and change the snapshot's retention period.", + ToolArgs: map[string]FlyArg{ + "app": { + Description: "Name of the app", + Required: true, + Type: "string", + }, + "id": { + Description: "id of the volume", + Required: true, + Type: "string", + }, + "scheduled-snapshots": { + Description: "Enable or disable scheduled snapshots", + Required: false, + Type: "boolean", + }, + "snapshot-retention": { + Description: "Retention period for snapshots in days", + Required: false, + Type: "number", + }, + }, + + Builder: func(args map[string]string) ([]string, error) { + cmdArgs := []string{"volume", "update"} + + if id, ok := args["id"]; ok { + cmdArgs = append(cmdArgs, id) + } else { + return nil, fmt.Errorf("id argument is required") + } + + if app, ok := args["app"]; ok { + cmdArgs = append(cmdArgs, "-a", app) + } else { + return nil, fmt.Errorf("app argument is required") + } + + if scheduledSnapshots, ok := args["scheduled-snapshots"]; ok { + scheduledSnapshotsBool, err := strconv.ParseBool(scheduledSnapshots) + if err != nil { + return nil, fmt.Errorf("invalid value for scheduled-snapshots: %v", err) + } else if scheduledSnapshotsBool { + cmdArgs = append(cmdArgs, "--scheduled-snapshots=true") + } else { + cmdArgs = append(cmdArgs, "--scheduled-snapshots=false") + } + } + + if snapshotRetention, ok := args["snapshot-retention"]; ok { + cmdArgs = append(cmdArgs, "--snapshot-retention", snapshotRetention) + } + + cmdArgs = append(cmdArgs, "--json") + + return cmdArgs, nil + }, + }, +} diff --git a/internal/command/mcp/volume.go b/internal/command/mcp/volume.go new file mode 100644 index 0000000000..23d6e4d14d --- /dev/null +++ b/internal/command/mcp/volume.go @@ -0,0 +1,139 @@ +package mcp + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/apex/log" + "github.com/spf13/cobra" + "github.com/superfly/flyctl/internal/command" + "github.com/superfly/flyctl/internal/flag" +) + +// newVolumeCommand creates the 'volume' command for flyctl. +func newVolume() *cobra.Command { + const ( + short = "[experimental] mount a fly volume" + long = short + "\n" + usage = "volume" + ) + + cmd := command.New(usage, short, long, runVolume) + cmd.Args = cobra.ExactArgs(0) + cmd.Hidden = true + + flag.Add(cmd, + flag.String{ + Name: "source", + Description: "Source of the volume", + Default: "data", + }, + flag.String{ + Name: "destination", + Description: "Destination path in the container", + Default: "/data", + }, + flag.String{ + Name: "initial-size", + Description: "Initial size of the volume", + Default: "1GB", + }, + flag.Int{ + Name: "auto-extend-size-threshold", + Description: "Auto extend size threshold percentage", + Default: 80, + }, + flag.String{ + Name: "auto-extend-size-increment", + Description: "Auto extend size increment", + Default: "1GB", + }, + flag.String{ + Name: "auto-extend-size-limit", + Description: "Auto extend size limit", + Default: "10GB", + }, + flag.Int{ + Name: "snapshot-retention", + Description: "Snapshot retention period in days", + Default: 0, + }, + flag.String{ + Name: "server", + Description: "Name to use for the MCP server in the MCP client configuration", + Default: "volume", + }, + ) + + for client, name := range McpClients { + flag.Add(cmd, + flag.Bool{ + Name: client, + Description: "Add MCP server to the " + name + " client configuration", + }, + ) + } + + return cmd +} + +// runVolume is the command handler for the 'volume' command +func runVolume(ctx context.Context) error { + volume := flag.GetString(ctx, "source") + ":" + flag.GetString(ctx, "destination") + + options := []string{} + + if initialSize := flag.GetString(ctx, "initial-size"); initialSize != "" { + options = append(options, "initial_size="+initialSize) + } + if autoExtendSizeThreshold := flag.GetInt(ctx, "auto-extend-size-threshold"); autoExtendSizeThreshold != 0 { + options = append(options, "auto_extend_size_threshold="+strconv.Itoa(autoExtendSizeThreshold)) + } + + if autoExtendSizeIncrement := flag.GetString(ctx, "auto-extend-size-increment"); autoExtendSizeIncrement != "" { + options = append(options, "auto_extend_size_increment="+autoExtendSizeIncrement) + } + + if autoExtendSizeLimit := flag.GetString(ctx, "auto-extend-size-limit"); autoExtendSizeLimit != "" { + options = append(options, "auto_extend_size_limit="+autoExtendSizeLimit) + } + + if snapshotRetention := flag.GetInt(ctx, "snapshot-retention"); snapshotRetention != 0 { + options = append(options, "snapshot_retention="+strconv.Itoa(snapshotRetention)) + } + + if len(options) > 0 { + volume += ":" + strings.Join(options, ",") + } + + args := []string{ + "mcp", + "launch", + `npx -y @modelcontextprotocol/server-filesystem ` + flag.GetString(ctx, "destination"), + "--server", flag.GetString(ctx, "server"), + "--volume", volume, + } + + // Add the MCP server to the MCP client configurations + for client := range McpClients { + if flag.GetBool(ctx, client) { + log.Debugf("Adding %s to MCP client configuration", client) + args = append(args, "--"+client) + } + } + + for _, config := range flag.GetStringArray(ctx, "config") { + if config != "" { + log.Debugf("Adding %s to MCP client configuration", config) + args = append(args, "--config", config) + } + } + + if err := flyctl(args...); err != nil { + return fmt.Errorf("failed to launch MCP volume: %w", err) + } + + return nil +} diff --git a/internal/command/mcp/wrap.go b/internal/command/mcp/wrap.go new file mode 100644 index 0000000000..8d163f2d21 --- /dev/null +++ b/internal/command/mcp/wrap.go @@ -0,0 +1,397 @@ +package mcp + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "log" + "net/http" + "os" + "os/exec" + "os/signal" + "strings" + "sync" + "syscall" + + "github.com/spf13/cobra" + "github.com/superfly/flyctl/internal/command" + "github.com/superfly/flyctl/internal/flag" +) + +// This program is a simple HTTP server that forwards POST requests to an MCP stdio program, +// and streams the program's output back to the client. It uses Server-Sent Events (SSE) +// to push updates from the server to the client. +// +// It is a streamlined version of the MCP proxy server, focusing on a single session: +// See https://modelcontextprotocol.io/specification/2025-03-26/basic/transports#streamable-http + +// Server handles HTTP requests and communicates with the remote program +type Server struct { + port int + mcp string + token string + user string + password string + private bool + cmd *exec.Cmd + args []string + stdin io.WriteCloser + stdout io.ReadCloser + mutex sync.Mutex + client chan string +} + +func NewWrap() *cobra.Command { + const ( + short = "[experimental] Wrap an MCP stdio program" + long = short + `. Options passed after double dashes ("--") will be passed to the MCP program. If user is specified, HTTP authentication will be required.` + "\n" + usage = "wrap" + ) + + cmd := command.New(usage, short, long, runWrap) + cmd.Args = cobra.ExactArgs(0) + + flag.Add(cmd, + flag.Int{ + Name: "port", + Description: "Port to listen on.", + Default: 8080, + Shorthand: "p", + }, + flag.String{ + Name: "mcp", + Description: "Path to the stdio MCP program to be wrapped.", + Shorthand: "m", + }, + flag.String{ + Name: "bearer-token", + Description: "Bearer token to authenticate with. Defaults to the value of the FLY_MCP_BEARER_TOKEN environment variable.", + }, + flag.String{ + Name: "user", + Description: "User to authenticate with. Defaults to the value of the FLY_MCP_USER environment variable.", + }, + flag.String{ + Name: "password", + Description: "Password to authenticate with. Defaults to the value of the FLY_MCP_PASSWORD environment variable.", + }, + flag.Bool{ + Name: "private", + Description: "Use private networking.", + }, + ) + + return cmd +} + +func runWrap(ctx context.Context) error { + token, _ := os.LookupEnv("FLY_MCP_BEARER_TOKEN") + user, _ := os.LookupEnv("FLY_MCP_USER") + password, _ := os.LookupEnv("FLY_MCP_PASSWORD") + _, private := os.LookupEnv("FLY_MCP_PRIVATE") + + if token == "" { + token = flag.GetString(ctx, "bearer-token") + } + + if user == "" { + user = flag.GetString(ctx, "user") + } + + if password == "" { + password = flag.GetString(ctx, "password") + } + + // Create server + server := &Server{ + port: flag.GetInt(ctx, "port"), + token: token, + user: user, + password: password, + private: flag.GetBool(ctx, "private") || private, + mcp: flag.GetString(ctx, "mcp"), + args: flag.ExtraArgsFromContext(ctx), + client: nil, + } + + // if user and password are not set, try to get them from environment variables + if server.user == "" { + server.user = os.Getenv("FLY_MCP_USER") + } + + if server.password == "" { + server.password = os.Getenv("FLY_MCP_PASSWORD") + } + + // Start the program + if err := server.StartProgram(); err != nil { + log.Fatalf("Error starting program: %v", err) + } + defer server.StopProgram() + + // Start reading from the program's stdout + go server.ReadFromProgram() + + // Set up HTTP server + http.HandleFunc("/", server.HandleHTTPRequest) + address := fmt.Sprintf(":%d", server.port) + + log.Printf("Starting server on %s, forwarding to stdio MCP: %s", address, server.mcp) + if err := http.ListenAndServe(address, nil); err != nil { + log.Fatalf("Error starting server: %v", err) + } + + return nil +} + +// StartProgram starts the remote program and connects to its stdin/stdout +func (s *Server) StartProgram() error { + command := s.mcp + args := s.args + + if command == "" { + if len(args) == 0 { + return fmt.Errorf("no command specified") + } + + command = args[0] + args = args[1:] + } + + cmd := exec.Command(command, args...) + + // Get stdin pipe + stdin, err := cmd.StdinPipe() + if err != nil { + return fmt.Errorf("error getting stdin pipe: %w", err) + } + s.stdin = stdin + + // Get stdout pipe + stdout, err := cmd.StdoutPipe() + if err != nil { + return fmt.Errorf("error getting stdout pipe: %w", err) + } + s.stdout = stdout + + // Redirect stderr to our stderr + cmd.Stderr = os.Stderr + + // Start the command + if err := cmd.Start(); err != nil { + return fmt.Errorf("error starting program: %w", err) + } + + s.cmd = cmd + + // Monitor program exit + go func() { + err := cmd.Wait() + if err != nil { + log.Printf("Program exited with error: %v", err) + } else { + log.Println("Program exited normally") + } + }() + + return nil +} + +// StopProgram stops the remote program +func (s *Server) StopProgram() { + if s.cmd != nil && s.cmd.Process != nil { + log.Println("Stopping program") + if err := s.cmd.Process.Kill(); err != nil { + log.Printf("Error killing program: %v", err) + } + } +} + +// ReadFromProgram continuously reads from the program's stdout +func (s *Server) ReadFromProgram() { + stp := make(chan os.Signal, 1) + signal.Notify(stp, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-stp + s.StopProgram() + os.Exit(0) + }() + + scanner := bufio.NewScanner(s.stdout) + const ( + defaultBufSize = bufio.MaxScanTokenSize // 64KiB + maxResponseSize = 10 * 1024 * 1024 // 10MiB + ) + scanner.Buffer(make([]byte, 0, defaultBufSize), maxResponseSize) + for scanner.Scan() { + line := scanner.Text() + "\n" + + // Forward message to waiting client + s.mutex.Lock() + if s.client != nil { + s.client <- line + } else { + log.Printf("No client waiting") + } + s.mutex.Unlock() + } + + if err := scanner.Err(); err != nil { + log.Printf("Error reading from program: %v", err) + } else { + log.Println("Program output stream closed") + } + + // Close stdin to signal EOF to the program + if err := s.stdin.Close(); err != nil { + log.Printf("Error closing stdin: %v", err) + } + // Close stdout to signal EOF to the program + if err := s.stdout.Close(); err != nil { + log.Printf("Error closing stdout: %v", err) + } +} + +// HandleHTTPRequest handles incoming HTTP requests +func (s *Server) HandleHTTPRequest(w http.ResponseWriter, r *http.Request) { + debugLog := os.Getenv("LOG_LEVEL") == "debug" + + // Access logging + if debugLog { + log.Printf("Incoming request: %s %s", r.Method, r.URL.Path) + for name, values := range r.Header { + if strings.EqualFold(name, "Authorization") { + log.Printf("Header: %s: [REDACTED]", name) + } else { + log.Printf("Header: %s: %v", name, values) + } + } + } + + if s.private { + clientIP := r.Header.Get("Fly-Client-Ip") + if clientIP != "" && !strings.HasPrefix(clientIP, "fdaa:") { + http.Error(w, "Forbidden", http.StatusForbidden) + return + } + } + + if s.token != "" { + // Check for bearer token + bearerToken := r.Header.Get("Authorization") + if bearerToken == "" || !strings.HasPrefix(bearerToken, "Bearer ") || strings.TrimSpace(bearerToken[7:]) != s.token { + http.Error(w, "Unauthorized", http.StatusUnauthorized) + return + } + } else if s.user != "" { + // Check for basic authentication + user, password, ok := r.BasicAuth() + if !ok || user != s.user || password != s.password { + http.Error(w, "Unauthorized", http.StatusUnauthorized) + return + } + } + + // Handle GET requests + if r.Method == http.MethodGet { + + // Respond to HTML requests with a simple message + acceptHeader := r.Header.Get("Accept") + if strings.Contains(acceptHeader, "html") && !strings.Contains(acceptHeader, "json") { + w.Header().Set("Content-Type", "text/plain") + w.WriteHeader(http.StatusOK) + w.Write([]byte("MCP Server")) + return + } + + // Create channel for response + responseCh := make(chan string, 1) + s.mutex.Lock() + if s.client == nil { + s.client = responseCh + } + s.mutex.Unlock() + + if s.client != responseCh { + // If we already have a client, return an error + http.Error(w, "Another client is already connected", http.StatusConflict) + return + } + + // Set headers for SSE + w.Header().Set("Content-Type", "text/event-stream") + w.Header().Set("Cache-Control", "no-cache") + w.Header().Set("Connection", "keep-alive") + w.WriteHeader(http.StatusOK) + + w.(http.Flusher).Flush() // Flush headers to the client + + // Stream responses to the client + for { + select { + case response := <-responseCh: + w.Write([]byte(response)) + w.(http.Flusher).Flush() // Flush the response to the client + case <-r.Context().Done(): + // Request was cancelled + s.mutex.Lock() + s.client = nil + s.mutex.Unlock() + return + } + } + + } else if r.Method == http.MethodPost { + if debugLog { + // Capture request body for logging + var bodyBuf bytes.Buffer + r.Body = io.NopCloser(io.TeeReader(r.Body, &bodyBuf)) + log.Printf("Request body: %s", bodyBuf.String()) + } + + // Stream request body to program's stdin, but inspect the last byte + var lastByte byte + buf := make([]byte, 4096) + for { + n, err := r.Body.Read(buf) + if n > 0 { + lastByte = buf[n-1] + if _, werr := s.stdin.Write(buf[:n]); werr != nil { + log.Printf("Error writing to program: %v", werr) + http.Error(w, fmt.Sprintf("Error writing to program: %v", werr), http.StatusInternalServerError) + return + } + } + if err == io.EOF { + break + } + if err != nil { + log.Printf("Error reading request body: %v", err) + http.Error(w, fmt.Sprintf("Error reading request body: %v", err), http.StatusBadRequest) + return + } + } + + // Ensure the last byte is a newline + if lastByte != '\n' { + s.stdin.Write([]byte{'\n'}) + } + + if f, ok := s.stdin.(interface{ Flush() error }); ok { + if err := f.Flush(); err != nil { + log.Printf("Error flushing stdin: %v", err) + } + } + + // Successfully wrote to program + w.WriteHeader(http.StatusAccepted) + r.Body.Close() + + } else { + // Method not allowed + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } +} diff --git a/internal/command/mpg/attach.go b/internal/command/mpg/attach.go new file mode 100644 index 0000000000..e5e5ecfcde --- /dev/null +++ b/internal/command/mpg/attach.go @@ -0,0 +1,117 @@ +package mpg + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/appsecrets" + "github.com/superfly/flyctl/internal/command" + "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flapsutil" + "github.com/superfly/flyctl/internal/flyutil" + "github.com/superfly/flyctl/internal/uiexutil" + "github.com/superfly/flyctl/iostreams" +) + +func newAttach() *cobra.Command { + const ( + short = "Attach a managed Postgres cluster to an app" + long = short + ". " + + `This command will add a secret to the specified app + containing the connection string for the database.` + usage = "attach " + ) + + cmd := command.New(usage, short, long, runAttach, + command.RequireSession, + command.RequireAppName, + command.RequireUiex, + ) + cmd.Args = cobra.ExactArgs(1) + + flag.Add(cmd, + flag.App(), + flag.AppConfig(), + flag.String{ + Name: "variable-name", + Default: "DATABASE_URL", + Description: "The name of the environment variable that will be added to the attached app", + }, + ) + + return cmd +} + +func runAttach(ctx context.Context) error { + // Check token compatibility early + if err := validateMPGTokenCompatibility(ctx); err != nil { + return err + } + + var ( + clusterId = flag.FirstArg(ctx) + appName = appconfig.NameFromContext(ctx) + client = flyutil.ClientFromContext(ctx) + uiexClient = uiexutil.ClientFromContext(ctx) + io = iostreams.FromContext(ctx) + ) + + // Get cluster details to determine which org it belongs to + response, err := uiexClient.GetManagedClusterById(ctx, clusterId) + if err != nil { + return fmt.Errorf("failed retrieving cluster %s: %w", clusterId, err) + } + + clusterOrgSlug := response.Data.Organization.Slug + + // Get app details to determine which org it belongs to + app, err := client.GetAppBasic(ctx, appName) + if err != nil { + return fmt.Errorf("failed retrieving app %s: %w", appName, err) + } + + appOrgSlug := app.Organization.RawSlug + + // Verify that the app and cluster are in the same organization + if appOrgSlug != clusterOrgSlug { + return fmt.Errorf("app %s is in organization %s, but cluster %s is in organization %s. They must be in the same organization to attach", + appName, appOrgSlug, clusterId, clusterOrgSlug) + } + + ctx, flapsClient, _, err := flapsutil.SetClient(ctx, nil, appName) + if err != nil { + return err + } + + variableName := flag.GetString(ctx, "variable-name") + + if variableName == "" { + variableName = "DATABASE_URL" + } + + // Check if the app already has the secret variable set + secrets, err := appsecrets.List(ctx, flapsClient, app.Name) + if err != nil { + return fmt.Errorf("failed retrieving secrets for app %s: %w", appName, err) + } + + for _, secret := range secrets { + if secret.Name == variableName { + return fmt.Errorf("app %s already has %s set. Use 'fly secrets unset %s' to remove it first", appName, variableName, variableName) + } + } + + s := map[string]string{} + s[variableName] = response.Credentials.ConnectionUri + + if err := appsecrets.Update(ctx, flapsClient, app.Name, s, nil); err != nil { + return err + } + + fmt.Fprintf(io.Out, "\nPostgres cluster %s is being attached to %s\n", clusterId, appName) + fmt.Fprintf(io.Out, "The following secret was added to %s:\n %s=%s\n", appName, variableName, response.Credentials.ConnectionUri) + + return nil +} diff --git a/internal/command/mpg/connect.go b/internal/command/mpg/connect.go new file mode 100644 index 0000000000..5e13da0e61 --- /dev/null +++ b/internal/command/mpg/connect.go @@ -0,0 +1,89 @@ +package mpg + +import ( + "context" + "fmt" + "os/exec" + + "github.com/logrusorgru/aurora" + "github.com/spf13/cobra" + + "github.com/superfly/flyctl/iostreams" + "github.com/superfly/flyctl/proxy" + + "github.com/superfly/flyctl/internal/command" + "github.com/superfly/flyctl/internal/flag" +) + +func newConnect() (cmd *cobra.Command) { + const ( + long = `Connect to a MPG database using psql` + + short = long + usage = "connect" + ) + + cmd = command.New(usage, short, long, runConnect, command.RequireSession, command.RequireUiex) + + flag.Add(cmd, + flag.MPGCluster(), + flag.String{ + Name: "database", + Shorthand: "d", + Description: "The database to connect to", + }, + ) + + return cmd +} + +func runConnect(ctx context.Context) (err error) { + // Check token compatibility early + if err := validateMPGTokenCompatibility(ctx); err != nil { + return err + } + + io := iostreams.FromContext(ctx) + + localProxyPort := "16380" + + cluster, params, credentials, err := getMpgProxyParams(ctx, localProxyPort) + if err != nil { + return err + } + + if cluster.Status != "ready" { + fmt.Fprintf(io.ErrOut, "%s Cluster is not in ready state, currently: %s\n", aurora.Yellow("WARN"), cluster.Status) + } + + psqlPath, err := exec.LookPath("psql") + if err != nil { + fmt.Fprintf(io.Out, "Could not find psql in your $PATH. Install it or point your psql at: %s", "someurl") + return + } + + err = proxy.Start(ctx, params) + if err != nil { + return err + } + + user := credentials.User + password := credentials.Password + db := credentials.DBName + + // Override database name if provided via flag + if database := flag.GetString(ctx, "database"); database != "" { + db = database + } + + connectUrl := fmt.Sprintf("postgresql://%s:%s@localhost:%s/%s", user, password, localProxyPort, db) + cmd := exec.CommandContext(ctx, psqlPath, connectUrl) + cmd.Stdout = io.Out + cmd.Stderr = io.ErrOut + cmd.Stdin = io.In + + cmd.Start() + cmd.Wait() + + return +} diff --git a/internal/command/mpg/create.go b/internal/command/mpg/create.go new file mode 100644 index 0000000000..daea78f34b --- /dev/null +++ b/internal/command/mpg/create.go @@ -0,0 +1,276 @@ +package mpg + +import ( + "context" + "fmt" + "sort" + "strings" + "time" + + "github.com/spf13/cobra" + "github.com/superfly/fly-go" + "github.com/superfly/flyctl/gql" + "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/command" + "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flyutil" + "github.com/superfly/flyctl/internal/prompt" + "github.com/superfly/flyctl/internal/uiex" + "github.com/superfly/flyctl/internal/uiexutil" + "github.com/superfly/flyctl/iostreams" +) + +type CreateClusterParams struct { + Name string + OrgSlug string + Region string + Plan string + VolumeSizeGB int + PostGISEnabled bool +} + +func newCreate() *cobra.Command { + const ( + short = "Create a new Managed Postgres cluster" + long = short + "\n" + ) + + cmd := command.New("create", short, long, runCreate, + command.RequireSession, + command.RequireUiex, + ) + + flag.Add( + cmd, + flag.Region(), + flag.Org(), + flag.String{ + Name: "name", + Shorthand: "n", + Description: "The name of your Postgres cluster", + }, + flag.String{ + Name: "plan", + Description: "The plan to use for the Postgres cluster (development, production, etc)", + }, + flag.Int{ + Name: "volume-size", + Description: "The volume size in GB", + Default: 10, + }, + flag.Bool{ + Name: "enable-postgis-support", + Description: "Enable PostGIS for the Postgres cluster", + Default: false, + }, + ) + + return cmd +} + +func runCreate(ctx context.Context) error { + // Check token compatibility early + if err := validateMPGTokenCompatibility(ctx); err != nil { + return err + } + + var ( + io = iostreams.FromContext(ctx) + appName = flag.GetString(ctx, "name") + err error + ) + + if appName == "" { + // If no name is provided, try to get the app name from context + if appName = appconfig.NameFromContext(ctx); appName != "" { + // If we have an app name, use it to create a default database name + appName = appName + "-db" + } else { + // If no app name is available, prompt for a name + appName, err = prompt.SelectAppNameWithMsg(ctx, "Choose a database name:") + if err != nil { + return err + } + } + } + + org, err := prompt.Org(ctx) + if err != nil { + return err + } + + // Get available MPG regions from API + mpgRegions, err := GetAvailableMPGRegions(ctx, org.RawSlug) + + if err != nil { + return err + } + + if len(mpgRegions) == 0 { + return fmt.Errorf("no valid regions found for Managed Postgres") + } + + // Check if region was specified via flag + regionCode := flag.GetString(ctx, "region") + var selectedRegion *fly.Region + + if regionCode != "" { + // Find the specified region in the allowed regions + for _, region := range mpgRegions { + if region.Code == regionCode { + selectedRegion = ®ion + break + } + } + if selectedRegion == nil { + availableCodes, _ := GetAvailableMPGRegionCodes(ctx, org.Slug) + return fmt.Errorf("region %s is not available for Managed Postgres. Available regions: %v", regionCode, availableCodes) + } + } else { + // Create region options for prompt + var regionOptions []string + for _, region := range mpgRegions { + regionOptions = append(regionOptions, fmt.Sprintf("%s (%s)", region.Name, region.Code)) + } + + var selectedIndex int + if err := prompt.Select(ctx, &selectedIndex, "Select a region for your Managed Postgres cluster", "", regionOptions...); err != nil { + return err + } + + selectedRegion = &mpgRegions[selectedIndex] + } + + // Plan selection and validation + plan := flag.GetString(ctx, "plan") + plan = normalizePlan(plan) + if _, ok := MPGPlans[plan]; !ok { + if iostreams.FromContext(ctx).IsInteractive() { + // Prepare a sortable slice of plans + type planEntry struct { + Key string + Value PlanDetails + } + var planEntries []planEntry + for k, v := range MPGPlans { + planEntries = append(planEntries, planEntry{Key: k, Value: v}) + } + // Sort by price (convert string like "$38.00" to float) + sort.Slice(planEntries, func(i, j int) bool { + return planEntries[i].Value.PricePerMo < planEntries[j].Value.PricePerMo + }) + // Build options and keys in sorted order + var planOptions []string + var planKeys []string + for _, entry := range planEntries { + planOptions = append(planOptions, fmt.Sprintf("%s: %s, %s RAM, $%d/mo", entry.Value.Name, entry.Value.CPU, entry.Value.Memory, entry.Value.PricePerMo)) + planKeys = append(planKeys, entry.Key) + } + var selectedIndex int + if err := prompt.Select(ctx, &selectedIndex, "Select a plan for your Managed Postgres cluster", planOptions[0], planOptions...); err != nil { + return err + } + plan = planKeys[selectedIndex] + } else { + plan = "basic" // Default to basic if not interactive + } + } + + var slug string + if org.Slug == "personal" { + genqClient := flyutil.ClientFromContext(ctx).GenqClient() + + // For ui-ex request we need the real org slug + var fullOrg *gql.GetOrganizationResponse + if fullOrg, err = gql.GetOrganization(ctx, genqClient, org.Slug); err != nil { + return fmt.Errorf("failed fetching org: %w", err) + } + + slug = fullOrg.Organization.RawSlug + } else { + slug = org.Slug + } + + params := &CreateClusterParams{ + Name: appName, + OrgSlug: slug, + Region: selectedRegion.Code, + Plan: plan, + VolumeSizeGB: flag.GetInt(ctx, "volume-size"), + PostGISEnabled: flag.GetBool(ctx, "enable-postgis-support"), + } + + uiexClient := uiexutil.ClientFromContext(ctx) + + input := uiex.CreateClusterInput{ + Name: params.Name, + Region: params.Region, + Plan: params.Plan, + OrgSlug: params.OrgSlug, + Disk: params.VolumeSizeGB, + PostGISEnabled: params.PostGISEnabled, + } + + response, err := uiexClient.CreateCluster(ctx, input) + if err != nil { + return fmt.Errorf("failed creating managed postgres cluster: %w", err) + } + + clusterID := response.Data.Id + + var connectionURI string + + // Output plan details after creation + planDetails := MPGPlans[plan] + fmt.Fprintf(io.Out, "Selected Plan: %s\n", planDetails.Name) + fmt.Fprintf(io.Out, " CPU: %s\n", planDetails.CPU) + fmt.Fprintf(io.Out, " Memory: %s\n", planDetails.Memory) + fmt.Fprintf(io.Out, " Price: $%d per month\n\n", planDetails.PricePerMo) + + // Wait for cluster to be ready + fmt.Fprintf(io.Out, "Waiting for cluster %s (%s) to be ready...\n", params.Name, clusterID) + fmt.Fprintf(io.Out, "You can view the cluster in the UI at: https://fly.io/dashboard/%s/managed_postgres/%s\n", params.OrgSlug, clusterID) + fmt.Fprintf(io.Out, "You can cancel this wait with Ctrl+C - the cluster will continue provisioning in the background.\n") + fmt.Fprintf(io.Out, "Once ready, you can connect to the database with: fly mpg connect --cluster %s\n\n", clusterID) + for { + res, err := uiexClient.GetManagedClusterById(ctx, clusterID) + if err != nil { + return fmt.Errorf("failed checking cluster status: %w", err) + } + + cluster := res.Data + credentials := res.Credentials + + if cluster.Id == "" { + return fmt.Errorf("invalid cluster response: no cluster ID") + } + + if cluster.Status == "ready" { + connectionURI = credentials.ConnectionUri + break + } + + if cluster.Status == "error" { + return fmt.Errorf("cluster creation failed") + } + + time.Sleep(5 * time.Second) + } + + fmt.Fprintf(io.Out, "\nManaged Postgres cluster created successfully!\n") + fmt.Fprintf(io.Out, " ID: %s\n", clusterID) + fmt.Fprintf(io.Out, " Name: %s\n", params.Name) + fmt.Fprintf(io.Out, " Organization: %s\n", params.OrgSlug) + fmt.Fprintf(io.Out, " Region: %s\n", params.Region) + fmt.Fprintf(io.Out, " Plan: %s\n", params.Plan) + fmt.Fprintf(io.Out, " Disk: %dGB\n", response.Data.Disk) + fmt.Fprintf(io.Out, " PostGIS: %t\n", response.Data.PostGISEnabled) + fmt.Fprintf(io.Out, " Connection string: %s\n", connectionURI) + + return nil +} + +// normalizePlan lowercases and trims whitespace from the plan name for lookup +func normalizePlan(plan string) string { + return strings.ToLower(strings.TrimSpace(plan)) +} diff --git a/internal/command/mpg/destroy.go b/internal/command/mpg/destroy.go new file mode 100644 index 0000000000..63e0e0ae1a --- /dev/null +++ b/internal/command/mpg/destroy.go @@ -0,0 +1,81 @@ +package mpg + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + "github.com/superfly/flyctl/internal/command" + "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/prompt" + "github.com/superfly/flyctl/internal/uiexutil" + "github.com/superfly/flyctl/iostreams" +) + +func newDestroy() *cobra.Command { + const ( + short = "Destroy a managed Postgres cluster" + long = short + ". " + + `This command will permanently destroy a managed Postgres cluster and all its data. +This action is not reversible.` + usage = "destroy " + ) + + cmd := command.New(usage, short, long, runDestroy, + command.RequireSession, + command.RequireUiex, + ) + cmd.Args = cobra.ExactArgs(1) + cmd.Aliases = []string{"delete", "remove", "rm"} + + flag.Add(cmd, + flag.Yes(), + ) + + return cmd +} + +func runDestroy(ctx context.Context) error { + // Check token compatibility early + if err := validateMPGTokenCompatibility(ctx); err != nil { + return err + } + + var ( + clusterId = flag.FirstArg(ctx) + uiexClient = uiexutil.ClientFromContext(ctx) + io = iostreams.FromContext(ctx) + colorize = io.ColorScheme() + ) + + // Get cluster details to verify ownership and show info + response, err := uiexClient.GetManagedClusterById(ctx, clusterId) + if err != nil { + return fmt.Errorf("failed retrieving cluster %s: %w", clusterId, err) + } + + if !flag.GetYes(ctx) { + const msg = "Destroying a managed Postgres cluster is not reversible. All data will be permanently lost." + fmt.Fprintln(io.ErrOut, colorize.Red(msg)) + + switch confirmed, err := prompt.Confirmf(ctx, "Destroy managed Postgres cluster %s from organization %s (%s)?", response.Data.Name, response.Data.Organization.Name, clusterId); { + case err == nil: + if !confirmed { + return nil + } + case prompt.IsNonInteractive(err): + return prompt.NonInteractiveError("--yes flag must be specified when not running interactively") + default: + return err + } + } + + // Destroy the cluster + err = uiexClient.DestroyCluster(ctx, response.Data.Organization.Slug, clusterId) + if err != nil { + return fmt.Errorf("failed to destroy cluster %s: %w", clusterId, err) + } + + fmt.Fprintf(io.Out, "Managed Postgres cluster %s (%s) was destroyed\n", response.Data.Name, clusterId) + return nil +} diff --git a/internal/command/mpg/list.go b/internal/command/mpg/list.go new file mode 100644 index 0000000000..456639cbce --- /dev/null +++ b/internal/command/mpg/list.go @@ -0,0 +1,93 @@ +package mpg + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + + "github.com/superfly/flyctl/gql" + "github.com/superfly/flyctl/iostreams" + + "github.com/superfly/flyctl/internal/command" + "github.com/superfly/flyctl/internal/command/orgs" + "github.com/superfly/flyctl/internal/config" + "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flyutil" + "github.com/superfly/flyctl/internal/render" + "github.com/superfly/flyctl/internal/uiexutil" +) + +func newList() *cobra.Command { + const ( + long = `List MPG clusters owned by the specified organization. +If no organization is specified, the user's personal organization is used.` + short = "List MPG clusters." + usage = "list" + ) + + cmd := command.New(usage, short, long, runList, + command.RequireSession, + command.RequireUiex, + ) + + cmd.Aliases = []string{"ls"} + + flag.Add(cmd, flag.JSONOutput()) + flag.Add(cmd, flag.Org()) + + return cmd +} + +func runList(ctx context.Context) error { + // Check token compatibility early + if err := validateMPGTokenCompatibility(ctx); err != nil { + return err + } + + cfg := config.FromContext(ctx) + out := iostreams.FromContext(ctx).Out + + org, err := orgs.OrgFromFlagOrSelect(ctx) + if err != nil { + return err + } + + uiexClient := uiexutil.ClientFromContext(ctx) + genqClient := flyutil.ClientFromContext(ctx).GenqClient() + + // For ui-ex request we need the real org slug + var fullOrg *gql.GetOrganizationResponse + if fullOrg, err = gql.GetOrganization(ctx, genqClient, org.Slug); err != nil { + err = fmt.Errorf("failed fetching org: %w", err) + return err + } + + clusters, err := uiexClient.ListManagedClusters(ctx, fullOrg.Organization.RawSlug) + if err != nil { + return fmt.Errorf("failed to list managed clusters for organization %s: %w", org.Slug, err) + } + + if len(clusters.Data) == 0 { + fmt.Fprintf(out, "No managed postgres clusters found in organization %s\n", org.Slug) + return nil + } + + if cfg.JSONOutput { + return render.JSON(out, clusters.Data) + } + + rows := make([][]string, 0, len(clusters.Data)) + for _, cluster := range clusters.Data { + rows = append(rows, []string{ + cluster.Id, + cluster.Name, + cluster.Organization.Slug, + cluster.Region, + cluster.Status, + cluster.Plan, + }) + } + + return render.Table(out, "", rows, "ID", "Name", "Org", "Region", "Status", "Plan") +} diff --git a/internal/command/mpg/mpg.go b/internal/command/mpg/mpg.go new file mode 100644 index 0000000000..9beac7133b --- /dev/null +++ b/internal/command/mpg/mpg.go @@ -0,0 +1,307 @@ +package mpg + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + fly "github.com/superfly/fly-go" + "github.com/superfly/flyctl/gql" + "github.com/superfly/flyctl/internal/command" + "github.com/superfly/flyctl/internal/config" + "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flyutil" + "github.com/superfly/flyctl/internal/prompt" + "github.com/superfly/flyctl/internal/uiex" + "github.com/superfly/flyctl/internal/uiexutil" +) + +// RegionProvider interface for getting platform regions +type RegionProvider interface { + GetPlatformRegions(ctx context.Context) ([]fly.Region, error) +} + +// DefaultRegionProvider implements RegionProvider using the prompt package +type DefaultRegionProvider struct{} + +func (p *DefaultRegionProvider) GetPlatformRegions(ctx context.Context) ([]fly.Region, error) { + regionsFuture := prompt.PlatformRegions(ctx) + regions, err := regionsFuture.Get() + if err != nil { + return nil, err + } + return regions.Regions, nil +} + +// MPGService provides MPG-related functionality with injectable dependencies +type MPGService struct { + uiexClient uiexutil.Client + regionProvider RegionProvider +} + +// NewMPGService creates a new MPGService with default dependencies +func NewMPGService(ctx context.Context) *MPGService { + return &MPGService{ + uiexClient: uiexutil.ClientFromContext(ctx), + regionProvider: &DefaultRegionProvider{}, + } +} + +// NewMPGServiceWithDependencies creates a new MPGService with custom dependencies +func NewMPGServiceWithDependencies(uiexClient uiexutil.Client, regionProvider RegionProvider) *MPGService { + return &MPGService{ + uiexClient: uiexClient, + regionProvider: regionProvider, + } +} + +func New() *cobra.Command { + const ( + short = `Manage Managed Postgres clusters.` + + long = short + "\n" + ) + + cmd := command.New("mpg", short, long, nil) + + flag.Add(cmd, + flag.Org(), + ) + + cmd.AddCommand( + newProxy(), + newConnect(), + newAttach(), + newStatus(), + newList(), + newCreate(), + newDestroy(), + ) + + return cmd +} + +// ClusterFromFlagOrSelect retrieves the cluster ID from the --cluster flag. +// If the flag is not set, it prompts the user to select a cluster from the available ones for the given organization. +func ClusterFromFlagOrSelect(ctx context.Context, orgSlug string) (*uiex.ManagedCluster, error) { + clusterID := flag.GetMPGClusterID(ctx) + uiexClient := uiexutil.ClientFromContext(ctx) + + clustersResponse, err := uiexClient.ListManagedClusters(ctx, orgSlug) + if err != nil { + return nil, fmt.Errorf("failed retrieving postgres clusters: %w", err) + } + + if len(clustersResponse.Data) == 0 { + return nil, fmt.Errorf("no managed postgres clusters found in organization %s", orgSlug) + } + + if clusterID != "" { + // If a cluster ID is provided via flag, find it + for i := range clustersResponse.Data { + if clustersResponse.Data[i].Id == clusterID { + return &clustersResponse.Data[i], nil + } + } + return nil, fmt.Errorf("managed postgres cluster %q not found in organization %s", clusterID, orgSlug) + } else { + // Otherwise, prompt the user to select a cluster + var options []string + for _, cluster := range clustersResponse.Data { + options = append(options, fmt.Sprintf("%s (%s)", cluster.Name, cluster.Region)) + } + + var index int + selectErr := prompt.Select(ctx, &index, "Select a Postgres cluster", "", options...) + if selectErr != nil { + return nil, selectErr + } + return &clustersResponse.Data[index], nil + } +} + +// GetAvailableMPGRegions returns the list of regions available for Managed Postgres +func GetAvailableMPGRegions(ctx context.Context, orgSlug string) ([]fly.Region, error) { + service := NewMPGService(ctx) + return service.GetAvailableMPGRegions(ctx, orgSlug) +} + +// GetAvailableMPGRegions returns the list of regions available for Managed Postgres +func (s *MPGService) GetAvailableMPGRegions(ctx context.Context, orgSlug string) ([]fly.Region, error) { + // Get platform regions + platformRegions, err := s.regionProvider.GetPlatformRegions(ctx) + if err != nil { + return nil, err + } + + // Try to get available MPG regions from API + mpgRegionsResponse, err := s.uiexClient.ListMPGRegions(ctx, orgSlug) + if err != nil { + return nil, err + } + + return filterMPGRegions(platformRegions, mpgRegionsResponse.Data), nil +} + +// IsValidMPGRegion checks if a region code is valid for Managed Postgres +func IsValidMPGRegion(ctx context.Context, orgSlug string, regionCode string) (bool, error) { + service := NewMPGService(ctx) + return service.IsValidMPGRegion(ctx, orgSlug, regionCode) +} + +// IsValidMPGRegion checks if a region code is valid for Managed Postgres +func (s *MPGService) IsValidMPGRegion(ctx context.Context, orgSlug string, regionCode string) (bool, error) { + availableRegions, err := s.GetAvailableMPGRegions(ctx, orgSlug) + if err != nil { + return false, err + } + + for _, region := range availableRegions { + if region.Code == regionCode { + return true, nil + } + } + return false, nil +} + +// GetAvailableMPGRegionCodes returns just the region codes for error messages +func GetAvailableMPGRegionCodes(ctx context.Context, orgSlug string) ([]string, error) { + service := NewMPGService(ctx) + return service.GetAvailableMPGRegionCodes(ctx, orgSlug) +} + +// GetAvailableMPGRegionCodes returns just the region codes for error messages +func (s *MPGService) GetAvailableMPGRegionCodes(ctx context.Context, orgSlug string) ([]string, error) { + availableRegions, err := s.GetAvailableMPGRegions(ctx, orgSlug) + if err != nil { + return nil, err + } + + var codes []string + for _, region := range availableRegions { + codes = append(codes, region.Code) + } + return codes, nil +} + +// filterMPGRegions filters platform regions based on MPG availability +func filterMPGRegions(platformRegions []fly.Region, mpgRegions []uiex.MPGRegion) []fly.Region { + var filteredRegions []fly.Region + + for _, region := range platformRegions { + for _, allowed := range mpgRegions { + if region.Code == allowed.Code && allowed.Available { + filteredRegions = append(filteredRegions, region) + break + } + } + } + + return filteredRegions +} + +// AliasedOrganizationSlug resolves organization slug the aliased slug +// using GraphQL. +// +// Example: +// +// Input: "jon-phenow" +// Output: "personal" (if "jon-phenow" is an alias for "personal") +// +// GraphQL Query: +// +// query { +// organization(slug: "jon-phenow"){ +// slug +// } +// } +// +// Response: +// +// { +// "data": { +// "organization": { +// "slug": "personal" +// } +// } +// } +func AliasedOrganizationSlug(ctx context.Context, inputSlug string) (string, error) { + client := flyutil.ClientFromContext(ctx) + genqClient := client.GenqClient() + + // Query the GraphQL API to resolve the organization slug + resp, err := gql.GetOrganization(ctx, genqClient, inputSlug) + if err != nil { + return "", fmt.Errorf("failed to resolve organization slug %q: %w", inputSlug, err) + } + + // Return the canonical slug from the API response + return resp.Organization.Slug, nil +} + +// ResolveOrganizationSlug resolves organization slug aliases to the canonical slug +// using GraphQL. This handles cases where users use aliases that map to different +// canonical organization slugs. +// +// Example: +// +// Input: "personal" +// Output: "jon-phenow" (if "personal" is an alias for "jon-phenow") +// +// GraphQL Query: +// +// query { +// organization(slug: "personal"){ +// rawSlug +// } +// } +// +// Response: +// +// { +// "data": { +// "organization": { +// "rawSlug": "jon-phenow" +// } +// } +// } +func ResolveOrganizationSlug(ctx context.Context, inputSlug string) (string, error) { + client := flyutil.ClientFromContext(ctx) + genqClient := client.GenqClient() + + // Query the GraphQL API to resolve the organization slug + resp, err := gql.GetOrganization(ctx, genqClient, inputSlug) + if err != nil { + return "", fmt.Errorf("failed to resolve organization slug %q: %w", inputSlug, err) + } + + // Return the canonical slug from the API response + return resp.Organization.RawSlug, nil +} + +// detectTokenHasMacaroon determines if the current context has macaroon-style tokens. +// MPG commands require macaroon tokens to function properly. +func detectTokenHasMacaroon(ctx context.Context) bool { + tokens := config.Tokens(ctx) + if tokens == nil { + return false + } + + // Check for macaroon tokens (newer style) + return len(tokens.GetMacaroonTokens()) > 0 +} + +// validateMPGTokenCompatibility checks if the current authentication tokens are compatible +// with MPG commands. MPG requires macaroon-style tokens and cannot work with older bearer tokens. +// Returns an error if bearer tokens are detected, suggesting the user upgrade their tokens. +func validateMPGTokenCompatibility(ctx context.Context) error { + if !detectTokenHasMacaroon(ctx) { + return fmt.Errorf(`MPG commands require updated tokens but found older-style tokens. + +Please upgrade your authentication by running: + flyctl auth logout + flyctl auth login +`) + } + return nil +} diff --git a/internal/command/mpg/mpg_test.go b/internal/command/mpg/mpg_test.go new file mode 100644 index 0000000000..9ed21de584 --- /dev/null +++ b/internal/command/mpg/mpg_test.go @@ -0,0 +1,932 @@ +package mpg + +import ( + "context" + "fmt" + "testing" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + fly "github.com/superfly/fly-go" + "github.com/superfly/fly-go/tokens" + "github.com/superfly/flyctl/internal/command_context" + "github.com/superfly/flyctl/internal/config" + "github.com/superfly/flyctl/internal/flag/flagctx" + "github.com/superfly/flyctl/internal/uiex" + "github.com/superfly/flyctl/internal/uiexutil" + "github.com/superfly/flyctl/iostreams" +) + +// MockUiexClient implements the uiexutil.Client interface for testing +type MockUiexClient struct { + ListMPGRegionsFunc func(ctx context.Context, orgSlug string) (uiex.ListMPGRegionsResponse, error) + ListManagedClustersFunc func(ctx context.Context, orgSlug string) (uiex.ListManagedClustersResponse, error) + GetManagedClusterFunc func(ctx context.Context, orgSlug string, id string) (uiex.GetManagedClusterResponse, error) + GetManagedClusterByIdFunc func(ctx context.Context, id string) (uiex.GetManagedClusterResponse, error) + CreateUserFunc func(ctx context.Context, id string, input uiex.CreateUserInput) (uiex.CreateUserResponse, error) + CreateClusterFunc func(ctx context.Context, input uiex.CreateClusterInput) (uiex.CreateClusterResponse, error) + DestroyClusterFunc func(ctx context.Context, orgSlug string, id string) error + CreateFlyManagedBuilderFunc func(ctx context.Context, orgSlug string, region string) (uiex.CreateFlyManagedBuilderResponse, error) +} + +func (m *MockUiexClient) ListMPGRegions(ctx context.Context, orgSlug string) (uiex.ListMPGRegionsResponse, error) { + if m.ListMPGRegionsFunc != nil { + return m.ListMPGRegionsFunc(ctx, orgSlug) + } + return uiex.ListMPGRegionsResponse{}, nil +} + +func (m *MockUiexClient) ListManagedClusters(ctx context.Context, orgSlug string) (uiex.ListManagedClustersResponse, error) { + if m.ListManagedClustersFunc != nil { + return m.ListManagedClustersFunc(ctx, orgSlug) + } + return uiex.ListManagedClustersResponse{}, nil +} + +func (m *MockUiexClient) GetManagedCluster(ctx context.Context, orgSlug string, id string) (uiex.GetManagedClusterResponse, error) { + if m.GetManagedClusterFunc != nil { + return m.GetManagedClusterFunc(ctx, orgSlug, id) + } + return uiex.GetManagedClusterResponse{}, nil +} + +func (m *MockUiexClient) GetManagedClusterById(ctx context.Context, id string) (uiex.GetManagedClusterResponse, error) { + if m.GetManagedClusterByIdFunc != nil { + return m.GetManagedClusterByIdFunc(ctx, id) + } + return uiex.GetManagedClusterResponse{}, nil +} + +func (m *MockUiexClient) CreateUser(ctx context.Context, id string, input uiex.CreateUserInput) (uiex.CreateUserResponse, error) { + if m.CreateUserFunc != nil { + return m.CreateUserFunc(ctx, id, input) + } + return uiex.CreateUserResponse{}, nil +} + +func (m *MockUiexClient) CreateFlyManagedBuilder(ctx context.Context, orgSlug string, region string) (uiex.CreateFlyManagedBuilderResponse, error) { + if m.CreateUserFunc != nil { + return m.CreateFlyManagedBuilderFunc(ctx, orgSlug, region) + } + return uiex.CreateFlyManagedBuilderResponse{}, nil +} + +func (m *MockUiexClient) CreateCluster(ctx context.Context, input uiex.CreateClusterInput) (uiex.CreateClusterResponse, error) { + if m.CreateClusterFunc != nil { + return m.CreateClusterFunc(ctx, input) + } + return uiex.CreateClusterResponse{}, nil +} + +func (m *MockUiexClient) DestroyCluster(ctx context.Context, orgSlug string, id string) error { + if m.DestroyClusterFunc != nil { + return m.DestroyClusterFunc(ctx, orgSlug, id) + } + return nil +} + +// MockRegionProvider implements RegionProvider for testing +type MockRegionProvider struct { + GetPlatformRegionsFunc func(ctx context.Context) ([]fly.Region, error) +} + +func (m *MockRegionProvider) GetPlatformRegions(ctx context.Context) ([]fly.Region, error) { + if m.GetPlatformRegionsFunc != nil { + return m.GetPlatformRegionsFunc(ctx) + } + return []fly.Region{}, nil +} + +// setupTestContext creates a context with all necessary components for testing +func setupTestContext() context.Context { + ctx := context.Background() + + // Add iostreams + ios, _, _, _ := iostreams.Test() + ctx = iostreams.NewContext(ctx, ios) + + // Add command context with a mock command + cmd := &cobra.Command{} + ctx = command_context.NewContext(ctx, cmd) + + // Add flag context with a flag set + flagSet := pflag.NewFlagSet("test", pflag.ContinueOnError) + flagSet.String("cluster", "", "Cluster ID") + flagSet.Bool("yes", false, "Auto-confirm") + flagSet.String("org", "", "Organization") + flagSet.Bool("json", false, "JSON output") + ctx = flagctx.NewContext(ctx, flagSet) + + return ctx +} + +// Test the actual filterMPGRegions function with real data +func TestFilterMPGRegions_RealFunctionality(t *testing.T) { + platformRegions := []fly.Region{ + {Code: "ord", Name: "Chicago, Illinois (US)"}, + {Code: "lax", Name: "Los Angeles, California (US)"}, + {Code: "ams", Name: "Amsterdam, Netherlands (EU)"}, + {Code: "nrt", Name: "Tokyo, Japan (AS)"}, + } + + mpgRegions := []uiex.MPGRegion{ + {Code: "ord", Available: true}, + {Code: "lax", Available: true}, + {Code: "ams", Available: false}, // Not available + // nrt not in MPG regions at all + } + + filtered := filterMPGRegions(platformRegions, mpgRegions) + + // Should only return ord and lax (available in MPG) + assert.Len(t, filtered, 2) + assert.Equal(t, "ord", filtered[0].Code) + assert.Equal(t, "lax", filtered[1].Code) + + // Verify the filtering logic works correctly + for _, region := range filtered { + found := false + for _, mpgRegion := range mpgRegions { + if region.Code == mpgRegion.Code && mpgRegion.Available { + found = true + break + } + } + assert.True(t, found, "Filtered region %s should be available in MPG", region.Code) + } +} + +// Test ClusterFromFlagOrSelect with actual flag context +func TestClusterFromFlagOrSelect_WithFlagContext(t *testing.T) { + ctx := setupTestContext() + + expectedCluster := uiex.ManagedCluster{ + Id: "test-cluster-123", + Name: "test-cluster", + Region: "ord", + Status: "ready", + Organization: fly.Organization{ + Slug: "test-org", + }, + } + + mockUiex := &MockUiexClient{ + ListManagedClustersFunc: func(ctx context.Context, orgSlug string) (uiex.ListManagedClustersResponse, error) { + assert.Equal(t, "test-org", orgSlug) + return uiex.ListManagedClustersResponse{ + Data: []uiex.ManagedCluster{expectedCluster}, + }, nil + }, + } + + ctx = uiexutil.NewContextWithClient(ctx, mockUiex) + + t.Run("no clusters found", func(t *testing.T) { + mockEmpty := &MockUiexClient{ + ListManagedClustersFunc: func(ctx context.Context, orgSlug string) (uiex.ListManagedClustersResponse, error) { + return uiex.ListManagedClustersResponse{Data: []uiex.ManagedCluster{}}, nil + }, + } + ctx := uiexutil.NewContextWithClient(ctx, mockEmpty) + + _, err := ClusterFromFlagOrSelect(ctx, "test-org") + assert.Error(t, err) + assert.Contains(t, err.Error(), "no managed postgres clusters found") + }) + + t.Run("cluster not found by ID", func(t *testing.T) { + // Set the cluster flag + flagSet := flagctx.FromContext(ctx) + flagSet.Set("cluster", "wrong-cluster-id") + + _, err := ClusterFromFlagOrSelect(ctx, "test-org") + assert.Error(t, err) + assert.Contains(t, err.Error(), "managed postgres cluster \"wrong-cluster-id\" not found") + }) + + t.Run("cluster found by ID", func(t *testing.T) { + // Set the cluster flag to a valid ID + flagSet := flagctx.FromContext(ctx) + flagSet.Set("cluster", "test-cluster-123") + + cluster, err := ClusterFromFlagOrSelect(ctx, "test-org") + require.NoError(t, err) + assert.Equal(t, expectedCluster.Id, cluster.Id) + assert.Equal(t, expectedCluster.Name, cluster.Name) + }) +} + +// Test the actual GetAvailableMPGRegions function with mocked dependencies +func TestGetAvailableMPGRegions_RealFunction(t *testing.T) { + ctx := setupTestContext() + + platformRegions := []fly.Region{ + {Code: "ord", Name: "Chicago, Illinois (US)"}, + {Code: "lax", Name: "Los Angeles, California (US)"}, + {Code: "ams", Name: "Amsterdam, Netherlands (EU)"}, + } + + mpgRegions := []uiex.MPGRegion{ + {Code: "ord", Available: true}, + {Code: "lax", Available: true}, + {Code: "ams", Available: false}, // Not available + } + + mockUiex := &MockUiexClient{ + ListMPGRegionsFunc: func(ctx context.Context, orgSlug string) (uiex.ListMPGRegionsResponse, error) { + assert.Equal(t, "test-org", orgSlug) + return uiex.ListMPGRegionsResponse{ + Data: mpgRegions, + }, nil + }, + } + + mockRegionProvider := &MockRegionProvider{ + GetPlatformRegionsFunc: func(ctx context.Context) ([]fly.Region, error) { + return platformRegions, nil + }, + } + + // Create service with mocked dependencies + service := NewMPGServiceWithDependencies(mockUiex, mockRegionProvider) + + // Test the actual function + regions, err := service.GetAvailableMPGRegions(ctx, "test-org") + require.NoError(t, err) + + // Should only return ord and lax (available), not ams (unavailable) + assert.Len(t, regions, 2) + assert.Equal(t, "ord", regions[0].Code) + assert.Equal(t, "lax", regions[1].Code) +} + +// Test the actual IsValidMPGRegion function +func TestIsValidMPGRegion_RealFunction(t *testing.T) { + ctx := setupTestContext() + + platformRegions := []fly.Region{ + {Code: "ord", Name: "Chicago, Illinois (US)"}, + {Code: "lax", Name: "Los Angeles, California (US)"}, + } + + mpgRegions := []uiex.MPGRegion{ + {Code: "ord", Available: true}, + {Code: "lax", Available: true}, + } + + mockUiex := &MockUiexClient{ + ListMPGRegionsFunc: func(ctx context.Context, orgSlug string) (uiex.ListMPGRegionsResponse, error) { + return uiex.ListMPGRegionsResponse{ + Data: mpgRegions, + }, nil + }, + } + + mockRegionProvider := &MockRegionProvider{ + GetPlatformRegionsFunc: func(ctx context.Context) ([]fly.Region, error) { + return platformRegions, nil + }, + } + + // Create service with mocked dependencies + service := NewMPGServiceWithDependencies(mockUiex, mockRegionProvider) + + // Test valid region + valid, err := service.IsValidMPGRegion(ctx, "test-org", "ord") + require.NoError(t, err) + assert.True(t, valid, "Should find valid region 'ord'") + + // Test invalid region + valid, err = service.IsValidMPGRegion(ctx, "test-org", "invalid") + require.NoError(t, err) + assert.False(t, valid, "Should not find invalid region") +} + +// Test the actual GetAvailableMPGRegionCodes function +func TestGetAvailableMPGRegionCodes_RealFunction(t *testing.T) { + ctx := setupTestContext() + + platformRegions := []fly.Region{ + {Code: "ord", Name: "Chicago, Illinois (US)"}, + {Code: "lax", Name: "Los Angeles, California (US)"}, + } + + mpgRegions := []uiex.MPGRegion{ + {Code: "ord", Available: true}, + {Code: "lax", Available: true}, + } + + mockUiex := &MockUiexClient{ + ListMPGRegionsFunc: func(ctx context.Context, orgSlug string) (uiex.ListMPGRegionsResponse, error) { + return uiex.ListMPGRegionsResponse{ + Data: mpgRegions, + }, nil + }, + } + + mockRegionProvider := &MockRegionProvider{ + GetPlatformRegionsFunc: func(ctx context.Context) ([]fly.Region, error) { + return platformRegions, nil + }, + } + + // Create service with mocked dependencies + service := NewMPGServiceWithDependencies(mockUiex, mockRegionProvider) + + // Test the actual function + codes, err := service.GetAvailableMPGRegionCodes(ctx, "test-org") + require.NoError(t, err) + + assert.Len(t, codes, 2) + assert.Contains(t, codes, "ord") + assert.Contains(t, codes, "lax") +} + +// Test the destroy command logic (extracted from runDestroy) +func TestDestroyCommand_Logic(t *testing.T) { + ctx := setupTestContext() + + clusterID := "test-cluster-123" + expectedCluster := uiex.ManagedCluster{ + Id: clusterID, + Name: "test-cluster", + Region: "ord", + Status: "ready", + Organization: fly.Organization{ + Slug: "test-org", + }, + } + + mockUiex := &MockUiexClient{ + GetManagedClusterByIdFunc: func(ctx context.Context, id string) (uiex.GetManagedClusterResponse, error) { + assert.Equal(t, clusterID, id) + return uiex.GetManagedClusterResponse{ + Data: expectedCluster, + }, nil + }, + DestroyClusterFunc: func(ctx context.Context, orgSlug string, id string) error { + assert.Equal(t, "test-org", orgSlug) + assert.Equal(t, clusterID, id) + return nil + }, + } + + ctx = uiexutil.NewContextWithClient(ctx, mockUiex) + + // Test successful cluster retrieval + response, err := mockUiex.GetManagedClusterById(ctx, clusterID) + require.NoError(t, err) + assert.Equal(t, expectedCluster.Id, response.Data.Id) + assert.Equal(t, expectedCluster.Name, response.Data.Name) + + // Test organization validation + if response.Data.Organization.Slug != "test-org" { + t.Error("Organization validation failed") + } + + // Test successful cluster destruction + err = mockUiex.DestroyCluster(ctx, "test-org", clusterID) + require.NoError(t, err) +} + +// Test the status command logic (extracted from runStatus) +func TestStatusCommand_Logic(t *testing.T) { + ctx := setupTestContext() + + clusterID := "test-cluster-123" + expectedCluster := uiex.ManagedCluster{ + Id: clusterID, + Name: "test-cluster", + Region: "ord", + Status: "ready", + Plan: "development", + Disk: 10, + Replicas: 1, + Organization: fly.Organization{ + Slug: "test-org", + }, + IpAssignments: uiex.ManagedClusterIpAssignments{ + Direct: "10.0.0.1", + }, + } + + mockUiex := &MockUiexClient{ + GetManagedClusterByIdFunc: func(ctx context.Context, id string) (uiex.GetManagedClusterResponse, error) { + assert.Equal(t, clusterID, id) + return uiex.GetManagedClusterResponse{ + Data: expectedCluster, + }, nil + }, + } + + ctx = uiexutil.NewContextWithClient(ctx, mockUiex) + + // Test successful cluster details retrieval + clusterDetails, err := mockUiex.GetManagedClusterById(ctx, clusterID) + require.NoError(t, err) + assert.Equal(t, expectedCluster.Id, clusterDetails.Data.Id) + assert.Equal(t, expectedCluster.Name, clusterDetails.Data.Name) + assert.Equal(t, expectedCluster.Region, clusterDetails.Data.Region) + assert.Equal(t, expectedCluster.Status, clusterDetails.Data.Status) + assert.Equal(t, expectedCluster.Disk, clusterDetails.Data.Disk) + assert.Equal(t, expectedCluster.Replicas, clusterDetails.Data.Replicas) + assert.Equal(t, expectedCluster.IpAssignments.Direct, clusterDetails.Data.IpAssignments.Direct) +} + +// Test the list command logic (extracted from runList) +func TestListCommand_Logic(t *testing.T) { + ctx := setupTestContext() + + expectedClusters := []uiex.ManagedCluster{ + { + Id: "cluster-1", + Name: "test-cluster-1", + Region: "ord", + Status: "ready", + Plan: "development", + Organization: fly.Organization{ + Slug: "test-org", + }, + }, + { + Id: "cluster-2", + Name: "test-cluster-2", + Region: "lax", + Status: "ready", + Plan: "production", + Organization: fly.Organization{ + Slug: "test-org", + }, + }, + } + + mockUiex := &MockUiexClient{ + ListManagedClustersFunc: func(ctx context.Context, orgSlug string) (uiex.ListManagedClustersResponse, error) { + assert.Equal(t, "test-org", orgSlug) + return uiex.ListManagedClustersResponse{ + Data: expectedClusters, + }, nil + }, + } + + ctx = uiexutil.NewContextWithClient(ctx, mockUiex) + + // Test successful cluster listing + clusters, err := mockUiex.ListManagedClusters(ctx, "test-org") + require.NoError(t, err) + assert.Len(t, clusters.Data, 2) + + // Verify cluster data + assert.Equal(t, expectedClusters[0].Id, clusters.Data[0].Id) + assert.Equal(t, expectedClusters[0].Name, clusters.Data[0].Name) + assert.Equal(t, expectedClusters[0].Region, clusters.Data[0].Region) + assert.Equal(t, expectedClusters[0].Status, clusters.Data[0].Status) + assert.Equal(t, expectedClusters[0].Plan, clusters.Data[0].Plan) + + assert.Equal(t, expectedClusters[1].Id, clusters.Data[1].Id) + assert.Equal(t, expectedClusters[1].Name, clusters.Data[1].Name) + assert.Equal(t, expectedClusters[1].Region, clusters.Data[1].Region) + assert.Equal(t, expectedClusters[1].Status, clusters.Data[1].Status) + assert.Equal(t, expectedClusters[1].Plan, clusters.Data[1].Plan) +} + +// Test error handling in API calls +func TestErrorHandling(t *testing.T) { + ctx := setupTestContext() + + t.Run("ListManagedClusters error", func(t *testing.T) { + mockUiex := &MockUiexClient{ + ListManagedClustersFunc: func(ctx context.Context, orgSlug string) (uiex.ListManagedClustersResponse, error) { + return uiex.ListManagedClustersResponse{}, fmt.Errorf("API error") + }, + } + ctx := uiexutil.NewContextWithClient(ctx, mockUiex) + + _, err := ClusterFromFlagOrSelect(ctx, "test-org") + assert.Error(t, err) + assert.Contains(t, err.Error(), "failed retrieving postgres clusters") + }) + + t.Run("GetManagedClusterById error", func(t *testing.T) { + mockUiex := &MockUiexClient{ + GetManagedClusterByIdFunc: func(ctx context.Context, id string) (uiex.GetManagedClusterResponse, error) { + return uiex.GetManagedClusterResponse{}, fmt.Errorf("API error") + }, + } + ctx := uiexutil.NewContextWithClient(ctx, mockUiex) + + _, err := mockUiex.GetManagedClusterById(ctx, "test-cluster") + assert.Error(t, err) + assert.Contains(t, err.Error(), "API error") + }) + + t.Run("DestroyCluster error", func(t *testing.T) { + mockUiex := &MockUiexClient{ + DestroyClusterFunc: func(ctx context.Context, orgSlug string, id string) error { + return fmt.Errorf("destroy failed") + }, + } + ctx := uiexutil.NewContextWithClient(ctx, mockUiex) + + err := mockUiex.DestroyCluster(ctx, "test-org", "test-cluster") + assert.Error(t, err) + assert.Contains(t, err.Error(), "destroy failed") + }) +} + +// Test the create command logic (extracted from runCreate) +func TestCreateCommand_Logic(t *testing.T) { + ctx := setupTestContext() + + expectedCluster := uiex.ManagedCluster{ + Id: "new-cluster-123", + Name: "test-db", + Region: "ord", + Status: "ready", + Organization: fly.Organization{ + Slug: "test-org", + }, + } + + platformRegions := []fly.Region{ + {Code: "ord", Name: "Chicago, Illinois (US)"}, + {Code: "lax", Name: "Los Angeles, California (US)"}, + } + + mpgRegions := []uiex.MPGRegion{ + {Code: "ord", Available: true}, + {Code: "lax", Available: true}, + } + + mockUiex := &MockUiexClient{ + ListMPGRegionsFunc: func(ctx context.Context, orgSlug string) (uiex.ListMPGRegionsResponse, error) { + return uiex.ListMPGRegionsResponse{ + Data: mpgRegions, + }, nil + }, + CreateClusterFunc: func(ctx context.Context, input uiex.CreateClusterInput) (uiex.CreateClusterResponse, error) { + // Verify the input parameters + assert.Equal(t, "test-db", input.Name) + assert.Equal(t, "ord", input.Region) + assert.Equal(t, "basic", input.Plan) + assert.Equal(t, "test-org", input.OrgSlug) + + return uiex.CreateClusterResponse{ + Data: struct { + Id string `json:"id"` + Name string `json:"name"` + Status *string `json:"status"` + Plan string `json:"plan"` + Environment *string `json:"environment"` + Region string `json:"region"` + Organization fly.Organization `json:"organization"` + Replicas int `json:"replicas"` + Disk int `json:"disk"` + IpAssignments uiex.ManagedClusterIpAssignments `json:"ip_assignments"` + PostGISEnabled bool `json:"postgis_enabled"` + }{ + Id: expectedCluster.Id, + Name: expectedCluster.Name, + Region: expectedCluster.Region, + Plan: expectedCluster.Plan, + Organization: expectedCluster.Organization, + PostGISEnabled: false, + }, + }, nil + }, + GetManagedClusterByIdFunc: func(ctx context.Context, id string) (uiex.GetManagedClusterResponse, error) { + assert.Equal(t, "new-cluster-123", id) + return uiex.GetManagedClusterResponse{ + Data: expectedCluster, + }, nil + }, + } + + mockRegionProvider := &MockRegionProvider{ + GetPlatformRegionsFunc: func(ctx context.Context) ([]fly.Region, error) { + return platformRegions, nil + }, + } + + // Create service with mocked dependencies + service := NewMPGServiceWithDependencies(mockUiex, mockRegionProvider) + + // Test region validation logic using the actual function + availableRegions, err := service.GetAvailableMPGRegions(ctx, "test-org") + require.NoError(t, err) + assert.Len(t, availableRegions, 2, "Should have 2 available regions") + + // Test region selection logic + regionCode := "ord" + var selectedRegion *fly.Region + for _, region := range availableRegions { + if region.Code == regionCode { + selectedRegion = ®ion + break + } + } + require.NotNil(t, selectedRegion, "Should find selected region") + assert.Equal(t, "ord", selectedRegion.Code) + + // Test cluster creation + input := uiex.CreateClusterInput{ + Name: "test-db", + Region: selectedRegion.Code, + Plan: "basic", + OrgSlug: "test-org", + } + + response, err := mockUiex.CreateCluster(ctx, input) + require.NoError(t, err) + assert.Equal(t, expectedCluster.Id, response.Data.Id) + assert.Equal(t, expectedCluster.Name, response.Data.Name) + + // Test cluster status checking + cluster, err := mockUiex.GetManagedClusterById(ctx, response.Data.Id) + require.NoError(t, err) + assert.Equal(t, expectedCluster.Status, cluster.Data.Status) +} + +// Test the attach command logic (extracted from runAttach) +func TestAttachCommand_Logic(t *testing.T) { + ctx := setupTestContext() + + clusterID := "test-cluster-123" + + expectedCluster := uiex.ManagedCluster{ + Id: clusterID, + Name: "test-cluster", + Region: "ord", + Status: "ready", + Organization: fly.Organization{ + Slug: "test-org", + }, + } + + expectedApp := &fly.AppCompact{ + Organization: &fly.OrganizationBasic{ + Slug: "test-org", + }, + } + + connectionURI := "postgresql://user:pass@host:5432/db" + + mockUiex := &MockUiexClient{ + GetManagedClusterByIdFunc: func(ctx context.Context, id string) (uiex.GetManagedClusterResponse, error) { + assert.Equal(t, clusterID, id) + return uiex.GetManagedClusterResponse{ + Data: expectedCluster, + Credentials: uiex.GetManagedClusterCredentialsResponse{ + ConnectionUri: connectionURI, + }, + }, nil + }, + } + + ctx = uiexutil.NewContextWithClient(ctx, mockUiex) + + // Test cluster retrieval + response, err := mockUiex.GetManagedClusterById(ctx, clusterID) + require.NoError(t, err) + assert.Equal(t, expectedCluster.Id, response.Data.Id) + assert.Equal(t, expectedCluster.Organization.Slug, response.Data.Organization.Slug) + assert.Equal(t, connectionURI, response.Credentials.ConnectionUri) + + // Test organization validation logic + clusterOrgSlug := response.Data.Organization.Slug + appOrgSlug := expectedApp.Organization.Slug + + // Test same organization - should pass + if appOrgSlug != clusterOrgSlug { + t.Error("Organization validation should pass for same organization") + } + + // Test organization validation failure + differentApp := &fly.AppCompact{ + Organization: &fly.OrganizationBasic{ + Slug: "different-org", + }, + } + + if differentApp.Organization.Slug == clusterOrgSlug { + t.Error("Organization validation should fail for different organizations") + } + + // Test secret validation logic + existingSecrets := []fly.Secret{ + {Name: "EXISTING_SECRET"}, + {Name: "ANOTHER_SECRET"}, + } + + variableName := "DATABASE_URL" + + // Test secret doesn't exist - should pass + secretExists := false + for _, secret := range existingSecrets { + if secret.Name == variableName { + secretExists = true + break + } + } + assert.False(t, secretExists, "Secret should not exist") + + // Test secret already exists - should fail + existingSecrets = append(existingSecrets, fly.Secret{Name: variableName}) + secretExists = false + for _, secret := range existingSecrets { + if secret.Name == variableName { + secretExists = true + break + } + } + assert.True(t, secretExists, "Secret should exist") +} + +// Test region validation in create command +func TestCreateCommand_RegionValidation(t *testing.T) { + ctx := setupTestContext() + + platformRegions := []fly.Region{ + {Code: "ord", Name: "Chicago, Illinois (US)"}, + {Code: "lax", Name: "Los Angeles, California (US)"}, + } + + mpgRegions := []uiex.MPGRegion{ + {Code: "ord", Available: true}, + {Code: "lax", Available: true}, + } + + mockUiex := &MockUiexClient{ + ListMPGRegionsFunc: func(ctx context.Context, orgSlug string) (uiex.ListMPGRegionsResponse, error) { + return uiex.ListMPGRegionsResponse{ + Data: mpgRegions, + }, nil + }, + } + + mockRegionProvider := &MockRegionProvider{ + GetPlatformRegionsFunc: func(ctx context.Context) ([]fly.Region, error) { + return platformRegions, nil + }, + } + + // Create service with mocked dependencies + service := NewMPGServiceWithDependencies(mockUiex, mockRegionProvider) + + // Test valid region using the actual function + valid, err := service.IsValidMPGRegion(ctx, "test-org", "ord") + require.NoError(t, err) + assert.True(t, valid, "Should find valid region") + + // Test invalid region using the actual function + valid, err = service.IsValidMPGRegion(ctx, "test-org", "invalid") + require.NoError(t, err) + assert.False(t, valid, "Should not find invalid region") +} + +// Test actual MPG token validation functions +func TestMPGTokenValidation(t *testing.T) { + t.Run("detectTokenHasMacaroon with actual contexts", func(t *testing.T) { + // Test case 1: Context with no config (should handle gracefully) + emptyCtx := context.Background() + // This should panic or return false - let's catch the panic + func() { + defer func() { + if r := recover(); r != nil { + // Expected panic due to no config in context + t.Logf("Expected panic caught: %v", r) + } + }() + result := detectTokenHasMacaroon(emptyCtx) + // If we get here without panicking, it should return false + assert.False(t, result, "Should return false when config is nil") + }() + + // Test case 2: Context with nil tokens + configWithNilTokens := &config.Config{ + Tokens: nil, + } + ctxWithNilTokens := config.NewContext(context.Background(), configWithNilTokens) + result := detectTokenHasMacaroon(ctxWithNilTokens) + assert.False(t, result, "Should return false when tokens are nil") + + // Test case 3: Context with empty tokens (no macaroons) + emptyTokens := tokens.Parse("") // Parse empty string creates empty tokens + configWithEmptyTokens := &config.Config{ + Tokens: emptyTokens, + } + ctxWithEmptyTokens := config.NewContext(context.Background(), configWithEmptyTokens) + result = detectTokenHasMacaroon(ctxWithEmptyTokens) + assert.False(t, result, "Should return false when no macaroon tokens exist") + + // Test case 4: Context with bearer tokens only (no macaroons) + bearerTokens := tokens.Parse("some_bearer_token_here") // This won't be recognized as macaroon + configWithBearerTokens := &config.Config{ + Tokens: bearerTokens, + } + ctxWithBearerTokens := config.NewContext(context.Background(), configWithBearerTokens) + result = detectTokenHasMacaroon(ctxWithBearerTokens) + assert.False(t, result, "Should return false when only bearer tokens exist") + + // Test case 5: Context with macaroon tokens + macaroonTokens := tokens.Parse("fm1r_test_macaroon_token,fm2_another_macaroon") // fm1r and fm2 prefixes are macaroon tokens + configWithMacaroonTokens := &config.Config{ + Tokens: macaroonTokens, + } + ctxWithMacaroonTokens := config.NewContext(context.Background(), configWithMacaroonTokens) + result = detectTokenHasMacaroon(ctxWithMacaroonTokens) + assert.True(t, result, "Should return true when macaroon tokens exist") + + // Test case 6: Context with mixed tokens (including macaroons) + mixedTokens := tokens.Parse("bearer_token,fm1a_macaroon_token,oauth_token") + configWithMixedTokens := &config.Config{ + Tokens: mixedTokens, + } + ctxWithMixedTokens := config.NewContext(context.Background(), configWithMixedTokens) + result = detectTokenHasMacaroon(ctxWithMixedTokens) + assert.True(t, result, "Should return true when macaroon tokens exist among mixed tokens") + }) + + t.Run("validateMPGTokenCompatibility with actual contexts", func(t *testing.T) { + // Test case 1: Context with nil tokens - should fail + configWithNilTokens := &config.Config{ + Tokens: nil, + } + ctxWithNilTokens := config.NewContext(context.Background(), configWithNilTokens) + err := validateMPGTokenCompatibility(ctxWithNilTokens) + assert.Error(t, err, "Should return error when no macaroon tokens") + assert.Contains(t, err.Error(), "MPG commands require updated tokens") + assert.Contains(t, err.Error(), "flyctl auth logout") + assert.Contains(t, err.Error(), "flyctl auth login") + + // Test case 2: Context with empty tokens - should fail + emptyTokens := tokens.Parse("") + configWithEmptyTokens := &config.Config{ + Tokens: emptyTokens, + } + ctxWithEmptyTokens := config.NewContext(context.Background(), configWithEmptyTokens) + err = validateMPGTokenCompatibility(ctxWithEmptyTokens) + assert.Error(t, err, "Should return error when no macaroon tokens") + assert.Contains(t, err.Error(), "MPG commands require updated tokens") + + // Test case 3: Context with bearer tokens only - should fail + bearerTokens := tokens.Parse("some_bearer_token") + configWithBearerTokens := &config.Config{ + Tokens: bearerTokens, + } + ctxWithBearerTokens := config.NewContext(context.Background(), configWithBearerTokens) + err = validateMPGTokenCompatibility(ctxWithBearerTokens) + assert.Error(t, err, "Should return error when no macaroon tokens") + assert.Contains(t, err.Error(), "MPG commands require updated tokens") + + // Test case 4: Context with macaroon tokens - should pass + macaroonTokens := tokens.Parse("fm1r_test_macaroon_token") + configWithMacaroonTokens := &config.Config{ + Tokens: macaroonTokens, + } + ctxWithMacaroonTokens := config.NewContext(context.Background(), configWithMacaroonTokens) + err = validateMPGTokenCompatibility(ctxWithMacaroonTokens) + assert.NoError(t, err, "Should not return error when macaroon tokens exist") + + // Test case 5: Context with mixed tokens including macaroons - should pass + mixedTokens := tokens.Parse("bearer_token,fm1a_macaroon_token,oauth_token") + configWithMixedTokens := &config.Config{ + Tokens: mixedTokens, + } + ctxWithMixedTokens := config.NewContext(context.Background(), configWithMixedTokens) + err = validateMPGTokenCompatibility(ctxWithMixedTokens) + assert.NoError(t, err, "Should not return error when macaroon tokens exist among mixed tokens") + }) + + t.Run("MPG commands reject non-macaroon tokens", func(t *testing.T) { + // This test verifies that actual MPG command functions call the validation + // and properly reject contexts without macaroon tokens + + // Create a context with bearer tokens only (no macaroons) + bearerTokens := tokens.Parse("some_bearer_token") + configWithBearerTokens := &config.Config{ + Tokens: bearerTokens, + } + ctxWithBearerTokens := config.NewContext(context.Background(), configWithBearerTokens) + + // Test that the actual run functions would reject this context + // We can't easily test the full run functions due to their dependencies, + // but we can verify the validation function they call would fail + err := validateMPGTokenCompatibility(ctxWithBearerTokens) + assert.Error(t, err, "MPG commands should reject contexts with only bearer tokens") + assert.Contains(t, err.Error(), "MPG commands require updated tokens") + + // Create a context with macaroon tokens + macaroonTokens := tokens.Parse("fm1r_macaroon_token") + configWithMacaroonTokens := &config.Config{ + Tokens: macaroonTokens, + } + ctxWithMacaroonTokens := config.NewContext(context.Background(), configWithMacaroonTokens) + + // Test that the validation would pass for macaroon tokens + err = validateMPGTokenCompatibility(ctxWithMacaroonTokens) + assert.NoError(t, err, "MPG commands should accept contexts with macaroon tokens") + }) +} diff --git a/internal/command/mpg/plans.go b/internal/command/mpg/plans.go new file mode 100644 index 0000000000..b8393c2009 --- /dev/null +++ b/internal/command/mpg/plans.go @@ -0,0 +1,36 @@ +package mpg + +// PlanDetails holds the details for each managed postgres plan. +type PlanDetails struct { + Name string + CPU string + Memory string + PricePerMo int +} + +var MPGPlans = map[string]PlanDetails{ + "basic": { + Name: "Basic", + CPU: "Shared x 2", + Memory: "1 GB", + PricePerMo: 38, + }, + "starter": { + Name: "Starter", + CPU: "Shared x 2", + Memory: "2 GB", + PricePerMo: 72, + }, + "launch": { + Name: "Launch", + CPU: "Performance x 2", + Memory: "8 GB", + PricePerMo: 282, + }, + "scale": { + Name: "Scale", + CPU: "Performance x 4", + Memory: "33 GB", + PricePerMo: 962, + }, +} diff --git a/internal/command/mpg/proxy.go b/internal/command/mpg/proxy.go new file mode 100644 index 0000000000..4cec88c7df --- /dev/null +++ b/internal/command/mpg/proxy.go @@ -0,0 +1,146 @@ +package mpg + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + "github.com/superfly/flyctl/agent" + "github.com/superfly/flyctl/gql" + "github.com/superfly/flyctl/internal/command" + "github.com/superfly/flyctl/internal/command/orgs" + "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flag/flagnames" + "github.com/superfly/flyctl/internal/flyutil" + "github.com/superfly/flyctl/internal/uiex" + "github.com/superfly/flyctl/internal/uiexutil" + "github.com/superfly/flyctl/proxy" +) + +func newProxy() (cmd *cobra.Command) { + const ( + long = `Proxy to a MPG database` + + short = long + usage = "proxy" + ) + + cmd = command.New(usage, short, long, runProxy, command.RequireSession, command.RequireUiex) + + flag.Add(cmd, + flag.Region(), + flag.MPGCluster(), + + flag.String{ + Name: flagnames.BindAddr, + Shorthand: "b", + Default: "127.0.0.1", + Description: "Local address to bind to", + }, + ) + + return cmd +} + +func runProxy(ctx context.Context) (err error) { + // Check token compatibility early + if err := validateMPGTokenCompatibility(ctx); err != nil { + return err + } + + localProxyPort := "16380" + _, params, _, err := getMpgProxyParams(ctx, localProxyPort) + if err != nil { + return err + } + + return proxy.Connect(ctx, params) +} + +func getMpgProxyParams(ctx context.Context, localProxyPort string) (*uiex.ManagedCluster, *proxy.ConnectParams, *uiex.GetManagedClusterCredentialsResponse, error) { + client := flyutil.ClientFromContext(ctx) + uiexClient := uiexutil.ClientFromContext(ctx) + + // Get cluster ID from flag - it's optional now + clusterID := flag.GetMPGClusterID(ctx) + + var cluster *uiex.ManagedCluster + var orgSlug string + var err error + + if clusterID != "" { + // If cluster ID is provided, get cluster details directly and extract org info from it + response, err := uiexClient.GetManagedClusterById(ctx, clusterID) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed retrieving cluster %s: %w", clusterID, err) + } + cluster = &response.Data + orgSlug = cluster.Organization.Slug + } else { + // If no cluster ID is provided, let user select org first, then cluster + org, err := orgs.OrgFromFlagOrSelect(ctx) + if err != nil { + return nil, nil, nil, err + } + + // For ui-ex requests we need the real org slug (resolve aliases like "personal") + genqClient := client.GenqClient() + var fullOrg *gql.GetOrganizationResponse + if fullOrg, err = gql.GetOrganization(ctx, genqClient, org.Slug); err != nil { + return nil, nil, nil, fmt.Errorf("failed fetching org: %w", err) + } + + // Now let user select a cluster from this organization + selectedCluster, err := ClusterFromFlagOrSelect(ctx, fullOrg.Organization.RawSlug) + if err != nil { + return nil, nil, nil, err + } + + cluster = selectedCluster + orgSlug = cluster.Organization.Slug + } + + // At this point we have both cluster and orgSlug + // Get credentials for the cluster + response, err := uiexClient.GetManagedClusterById(ctx, cluster.Id) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed retrieving cluster credentials %s: %w", cluster.Id, err) + } + + // Resolve organization slug to handle aliases + resolvedOrgSlug, err := AliasedOrganizationSlug(ctx, orgSlug) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to resolve organization slug: %w", err) + } + + if response.Credentials.Status == "initializing" { + return nil, nil, nil, fmt.Errorf("cluster is still initializing, wait a bit more") + } + + if response.Credentials.Status == "error" || response.Credentials.Password == "" { + return nil, nil, nil, fmt.Errorf("error getting cluster password") + } + + if cluster.IpAssignments.Direct == "" { + return nil, nil, nil, fmt.Errorf("error getting cluster IP") + } + + agentclient, err := agent.Establish(ctx, client) + if err != nil { + return nil, nil, nil, err + } + + // Use the resolved organization slug for wireguard tunnel + dialer, err := agentclient.ConnectToTunnel(ctx, resolvedOrgSlug, "", false) + if err != nil { + return nil, nil, nil, err + } + + return cluster, &proxy.ConnectParams{ + Ports: []string{localProxyPort, "5432"}, + OrganizationSlug: resolvedOrgSlug, + Dialer: dialer, + BindAddr: flag.GetBindAddr(ctx), + RemoteHost: cluster.IpAssignments.Direct, + }, &response.Credentials, nil +} diff --git a/internal/command/mpg/status.go b/internal/command/mpg/status.go new file mode 100644 index 0000000000..d23a1fdb8f --- /dev/null +++ b/internal/command/mpg/status.go @@ -0,0 +1,86 @@ +package mpg + +import ( + "context" + "fmt" + "strconv" + + "github.com/spf13/cobra" + "github.com/superfly/flyctl/iostreams" + + "github.com/superfly/flyctl/internal/command" + "github.com/superfly/flyctl/internal/config" + "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/render" + "github.com/superfly/flyctl/internal/uiexutil" +) + +func newStatus() *cobra.Command { + const ( + long = `Show status and details of a specific Managed Postgres cluster using its ID.` + short = "Show MPG cluster status." + usage = "status [CLUSTER_ID]" + ) + + cmd := command.New(usage, short, long, runStatus, + command.RequireSession, + command.RequireUiex, + ) + + cmd.Args = cobra.ExactArgs(1) + + flag.Add(cmd, flag.JSONOutput()) + + return cmd +} + +func runStatus(ctx context.Context) error { + // Check token compatibility early + if err := validateMPGTokenCompatibility(ctx); err != nil { + return err + } + + cfg := config.FromContext(ctx) + out := iostreams.FromContext(ctx).Out + uiexClient := uiexutil.ClientFromContext(ctx) + + clusterID := flag.FirstArg(ctx) + if clusterID == "" { + // Should not happen due to cobra.ExactArgs(1), but good practice + return fmt.Errorf("cluster ID argument is required") + } + + // Fetch detailed cluster information by ID + clusterDetails, err := uiexClient.GetManagedClusterById(ctx, clusterID) + if err != nil { + return fmt.Errorf("failed retrieving details for cluster %s: %w", clusterID, err) + } + + if cfg.JSONOutput { + return render.JSON(out, clusterDetails) + } + + rows := [][]string{{ + clusterDetails.Data.Id, + clusterDetails.Data.Name, + clusterDetails.Data.Organization.Slug, + clusterDetails.Data.Region, + clusterDetails.Data.Status, + strconv.Itoa(clusterDetails.Data.Disk), + strconv.Itoa(clusterDetails.Data.Replicas), + clusterDetails.Data.IpAssignments.Direct, + }} + + cols := []string{ + "ID", + "Name", + "Organization", + "Region", + "Status", + "Allocated Disk (GB)", + "Replicas", + "Direct IP", + } + + return render.VerticalTable(out, "Cluster Status", rows, cols...) +} diff --git a/internal/command/orgs/create.go b/internal/command/orgs/create.go index 935ce5c0fa..7540bafafe 100644 --- a/internal/command/orgs/create.go +++ b/internal/command/orgs/create.go @@ -51,9 +51,7 @@ func runCreate(ctx context.Context) error { return err } - var name string - - name = flag.FirstArg(ctx) + var name = flag.FirstArg(ctx) if user.EnablePaidHobby { fmt.Fprintf(io.Out, "New organizations start on the Pay As You Go plan.\n\n") diff --git a/internal/command/platform/regions.go b/internal/command/platform/regions.go index cc5d8ed02c..ea0622afca 100644 --- a/internal/command/platform/regions.go +++ b/internal/command/platform/regions.go @@ -1,34 +1,38 @@ package platform import ( + "cmp" "context" "fmt" + "slices" "sort" + "github.com/samber/lo" "github.com/spf13/cobra" - "golang.org/x/exp/slices" - - "github.com/superfly/flyctl/iostreams" - + "github.com/superfly/fly-go" + "github.com/superfly/fly-go/flaps" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/config" "github.com/superfly/flyctl/internal/flag" - "github.com/superfly/flyctl/internal/flyutil" + "github.com/superfly/flyctl/internal/flapsutil" "github.com/superfly/flyctl/internal/render" + "github.com/superfly/flyctl/iostreams" ) // Hardcoded list of regions with GPUs // TODO: fetch this list from the graphql endpoint once it is there var gpuRegions = []string{"iad", "sjc", "syd", "ams"} +const RegionsCommandDesc = `View a list of regions where Fly has datacenters. +'Capacity' shows how many performance-1x VMs can currently be launched in each region. +` + func newRegions() (cmd *cobra.Command) { const ( - long = `View a list of regions where Fly has edges and/or datacenters -` short = "List regions" ) - cmd = command.New("regions", short, long, runRegions, + cmd = command.New("regions", short, RegionsCommandDesc, runRegions, command.RequireSession, ) @@ -38,9 +42,11 @@ func newRegions() (cmd *cobra.Command) { } func runRegions(ctx context.Context) error { - client := flyutil.ClientFromContext(ctx) - - regions, _, err := client.PlatformRegions(ctx) + flapsClient, err := flapsutil.NewClientWithOptions(ctx, flaps.NewClientOpts{}) + if err != nil { + return err + } + regions, err := flapsClient.GetRegions(ctx, "") if err != nil { return fmt.Errorf("failed retrieving regions: %w", err) } @@ -48,34 +54,47 @@ func runRegions(ctx context.Context) error { return regions[i].Name < regions[j].Name }) - out := iostreams.FromContext(ctx).Out + io := iostreams.FromContext(ctx) + out := io.Out if config.FromContext(ctx).JSONOutput { return render.JSON(out, regions) } var rows [][]string - for _, region := range regions { - gateway := "" - if region.GatewayAvailable { - gateway = "✓" - } - paidPlan := "" - if region.RequiresPaidPlan { - paidPlan = "✓" - } - gpuAvailable := "" - if slices.Contains(gpuRegions, region.Code) { - gpuAvailable = "✓" - } + regionGroups := lo.GroupBy(regions, func(item fly.Region) fly.GeoRegion { return item.GeoRegion }) + keys := lo.Keys(regionGroups) + slices.SortFunc(keys, func(a, b fly.GeoRegion) int { return cmp.Compare(a, b) }) + for _, key := range keys { + regionGroup := regionGroups[key] + rows = append(rows, []string{""}) + rows = append(rows, []string{io.ColorScheme().Underline(key.String())}) + for _, region := range regionGroup { + gateway := "" + if region.GatewayAvailable { + gateway = "✓" + } + paidPlan := "" + if region.RequiresPaidPlan { + paidPlan = "✓" + } + gpuAvailable := "" + if slices.Contains(gpuRegions, region.Code) { + gpuAvailable = "✓" + } + + capacity := fmt.Sprint(region.Capacity) + capacity = io.ColorScheme().RedGreenGradient(capacity, float64(region.Capacity)/1000) - rows = append(rows, []string{ - region.Name, - region.Code, - gateway, - paidPlan, - gpuAvailable, - }) + rows = append(rows, []string{ + region.Name, + region.Code, + gateway, + gpuAvailable, + capacity, + paidPlan, + }) + } } - return render.Table(out, "", rows, "Name", "Code", "Gateway", "Launch Plan + Only", "GPUs") + return render.Table(out, "", rows, "Name", "Code", "Gateway", "GPUs", "Capacity", "Launch Plan+") } diff --git a/internal/command/platform/vmsizes.go b/internal/command/platform/vmsizes.go index 0922ea9f0c..a60ab8be09 100644 --- a/internal/command/platform/vmsizes.go +++ b/internal/command/platform/vmsizes.go @@ -10,6 +10,7 @@ import ( fly "github.com/superfly/fly-go" "github.com/superfly/flyctl/internal/command" + "github.com/superfly/flyctl/internal/config" "github.com/superfly/flyctl/internal/flag" "github.com/superfly/flyctl/internal/render" "github.com/superfly/flyctl/iostreams" @@ -33,6 +34,7 @@ func newVMSizes() (cmd *cobra.Command) { } func runMachineVMSizes(ctx context.Context) error { + cfg := config.FromContext(ctx) out := iostreams.FromContext(ctx).Out type preset struct { @@ -63,27 +65,35 @@ func runMachineVMSizes(ctx context.Context) error { } }) - // Filter and display shared cpu sizes. - shared := lo.FilterMap(sortedPresets, func(p preset, _ int) ([]string, bool) { - return p.strings, p.guest.CPUKind == "shared" && p.guest.GPUKind == "" - }) - if err := render.Table(out, "Machines platform", shared, "Name", "CPU Cores", "Memory"); err != nil { - return err - } + if cfg.JSONOutput { + vmSizes := make(map[string]*fly.MachineGuest, len(sortedPresets)) + for _, preset := range sortedPresets { + vmSizes[preset.strings[0]] = preset.guest + } + return render.JSON(out, vmSizes) + } else { + // Filter and display shared cpu sizes. + shared := lo.FilterMap(sortedPresets, func(p preset, _ int) ([]string, bool) { + return p.strings, p.guest.CPUKind == "shared" && p.guest.GPUKind == "" + }) + if err := render.Table(out, "Machines platform", shared, "Name", "CPU Cores", "Memory"); err != nil { + return err + } - // Filter and display performance cpu sizes. - performance := lo.FilterMap(sortedPresets, func(p preset, _ int) ([]string, bool) { - return p.strings, p.guest.CPUKind == "performance" && p.guest.GPUKind == "" - }) - if err := render.Table(out, "", performance, "Name", "CPU Cores", "Memory"); err != nil { - return err - } + // Filter and display performance cpu sizes. + performance := lo.FilterMap(sortedPresets, func(p preset, _ int) ([]string, bool) { + return p.strings, p.guest.CPUKind == "performance" && p.guest.GPUKind == "" + }) + if err := render.Table(out, "", performance, "Name", "CPU Cores", "Memory"); err != nil { + return err + } - // Filter and display gpu sizes. - gpus := lo.FilterMap(sortedPresets, func(p preset, _ int) ([]string, bool) { - return p.strings, p.guest.GPUKind != "" - }) - return render.Table(out, "", gpus, "Name", "CPU Cores", "Memory", "GPU model") + // Filter and display gpu sizes. + gpus := lo.FilterMap(sortedPresets, func(p preset, _ int) ([]string, bool) { + return p.strings, p.guest.GPUKind != "" + }) + return render.Table(out, "", gpus, "Name", "CPU Cores", "Memory", "GPU model") + } } func cores(cores int) string { diff --git a/internal/command/postgres/add_flycast.go b/internal/command/postgres/add_flycast.go index 7d7af1a0a3..c9817ab228 100644 --- a/internal/command/postgres/add_flycast.go +++ b/internal/command/postgres/add_flycast.go @@ -7,6 +7,7 @@ import ( "github.com/spf13/cobra" fly "github.com/superfly/fly-go" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/command/apps" "github.com/superfly/flyctl/internal/flag" @@ -72,8 +73,8 @@ func doAddFlycast(ctx context.Context) error { return fmt.Errorf("machines could not be retrieved %w", err) } - var bouncerPort int = 5432 - var pgPort int = 5433 + var bouncerPort = 5432 + var pgPort = 5433 for _, machine := range machines { for _, service := range machine.Config.Services { if service.InternalPort == 5432 || service.InternalPort == 5433 { @@ -124,8 +125,15 @@ func doAddFlycast(ctx context.Context) error { }, } + appName := appconfig.NameFromContext(ctx) + minvers, err := appsecrets.GetMinvers(appName) + if err != nil { + return err + } + err = mach.Update(ctx, machine, &fly.LaunchMachineInput{ - Config: conf, + Config: conf, + MinSecretsVersion: minvers, }) if err != nil { return err diff --git a/internal/command/postgres/attach.go b/internal/command/postgres/attach.go index 657b2af666..c8e4324a4b 100644 --- a/internal/command/postgres/attach.go +++ b/internal/command/postgres/attach.go @@ -11,9 +11,11 @@ import ( "github.com/superfly/flyctl/flypg" "github.com/superfly/flyctl/helpers" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/command/apps" "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flapsutil" "github.com/superfly/flyctl/internal/flyutil" mach "github.com/superfly/flyctl/internal/machine" "github.com/superfly/flyctl/internal/prompt" @@ -211,6 +213,11 @@ func runAttachCluster(ctx context.Context, leaderIP string, params AttachParams, superuser = params.SuperUser ) + ctx, flapsClient, _, err := flapsutil.SetClient(ctx, nil, appName) + if err != nil { + return err + } + if dbName == "" { dbName = appName } @@ -240,7 +247,7 @@ func runAttachCluster(ctx context.Context, leaderIP string, params AttachParams, fmt.Fprintln(io.Out, "Checking for existing attachments") - secrets, err := client.GetAppSecrets(ctx, input.AppID) + secrets, err := appsecrets.List(ctx, flapsClient, appName) if err != nil { return err } @@ -323,7 +330,7 @@ func runAttachCluster(ctx context.Context, leaderIP string, params AttachParams, s := map[string]string{} s[*input.VariableName] = connectionString - _, err = client.SetSecrets(ctx, input.AppID, s) + err = appsecrets.Update(ctx, flapsClient, appName, s, nil) if err != nil { return err } diff --git a/internal/command/postgres/backup.go b/internal/command/postgres/backup.go index 8c6b2fe487..a3f406d06f 100644 --- a/internal/command/postgres/backup.go +++ b/internal/command/postgres/backup.go @@ -10,6 +10,7 @@ import ( "github.com/superfly/fly-go/flaps" "github.com/superfly/flyctl/flypg" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/flapsutil" "github.com/superfly/flyctl/iostreams" @@ -112,7 +113,7 @@ func runBackupRestore(ctx context.Context) error { return fmt.Errorf("backups are only supported on Flexclusters") } - enabled, err := isBackupEnabled(ctx, appName) + enabled, err := isBackupEnabled(ctx, flapsClient, appName) if err != nil { return err } @@ -242,7 +243,7 @@ func runBackupCreate(ctx context.Context) error { return fmt.Errorf("backups are only supported on Flexclusters") } - enabled, err := isBackupEnabled(ctx, appName) + enabled, err := isBackupEnabled(ctx, flapsClient, appName) if err != nil { return err } @@ -302,7 +303,7 @@ func runBackupEnable(ctx context.Context) error { client = flyutil.ClientFromContext(ctx) ) - app, err := client.GetAppCompact(ctx, appName) + ctx, flapsClient, app, err := flapsutil.SetClient(ctx, nil, appName) if err != nil { return err } @@ -312,7 +313,7 @@ func runBackupEnable(ctx context.Context) error { } // Check to see if backups are already enabled - enabled, err := isBackupEnabled(ctx, appName) + enabled, err := isBackupEnabled(ctx, flapsClient, appName) if err != nil { return err } @@ -322,13 +323,6 @@ func runBackupEnable(ctx context.Context) error { return fmt.Errorf("backups are already enabled") } - flapsClient, err := flapsutil.NewClientWithOptions(ctx, flaps.NewClientOpts{ - AppName: appName, - }) - if err != nil { - return fmt.Errorf("failed to initialize flaps client: %w", err) - } - machines, err := flapsClient.ListActive(ctx) if err != nil { return err @@ -374,7 +368,7 @@ func runBackupEnable(ctx context.Context) error { flypg.BarmanSecretName: pgInput.BarmanSecret, } - if _, err := client.SetSecrets(ctx, appName, secrets); err != nil { + if err := appsecrets.Update(ctx, flapsClient, appName, secrets, nil); err != nil { return fmt.Errorf("failed to set secrets: %w", err) } @@ -431,7 +425,7 @@ func runBackupList(ctx context.Context) error { return fmt.Errorf("backups are only supported on Flexclusters") } - enabled, err := isBackupEnabled(ctx, appName) + enabled, err := isBackupEnabled(ctx, flapsClient, appName) if err != nil { return err } @@ -465,12 +459,8 @@ func resolveRestoreTarget(ctx context.Context) string { return target } -func isBackupEnabled(ctx context.Context, appName string) (bool, error) { - var ( - client = flyutil.ClientFromContext(ctx) - ) - - secrets, err := client.GetAppSecrets(ctx, appName) +func isBackupEnabled(ctx context.Context, flapsClient flapsutil.FlapsClient, appName string) (bool, error) { + secrets, err := appsecrets.List(ctx, flapsClient, appName) if err != nil { return false, err } @@ -576,7 +566,7 @@ func runBackupConfigShow(ctx context.Context) error { return fmt.Errorf("backups are only supported on Flexclusters") } - enabled, err := isBackupEnabled(ctx, appName) + enabled, err := isBackupEnabled(ctx, flapsClient, appName) if err != nil { return err } @@ -623,7 +613,7 @@ func runBackupConfigUpdate(ctx context.Context) error { return err } - enabled, err := isBackupEnabled(ctx, appName) + enabled, err := isBackupEnabled(ctx, flapsClient, appName) if err != nil { return err } diff --git a/internal/command/postgres/barman.go b/internal/command/postgres/barman.go index 7fb65b7f92..bfcfd55fe3 100644 --- a/internal/command/postgres/barman.go +++ b/internal/command/postgres/barman.go @@ -13,6 +13,7 @@ import ( "github.com/superfly/fly-go/flaps" "github.com/superfly/flyctl/agent" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/buildinfo" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/command/apps" @@ -245,10 +246,16 @@ func runBarmanCreate(ctx context.Context) error { Path: volumePath, }) + minvers, err := appsecrets.GetMinvers(appName) + if err != nil { + return err + } + launchInput := fly.LaunchMachineInput{ - Name: "barman", - Region: volInput.Region, - Config: &machineConfig, + Name: "barman", + Region: volInput.Region, + Config: &machineConfig, + MinSecretsVersion: minvers, } fmt.Fprintf(io.Out, "Provisioning barman machine with image %s\n", machineConfig.Image) @@ -468,7 +475,7 @@ func runConsole(ctx context.Context, cmd string) error { return fmt.Errorf("get app: %w", err) } - agentclient, dialer, err := ssh.BringUpAgent(ctx, client, app, "", false) + agentclient, dialer, err := agent.BringUpAgent(ctx, client, app, "", false) if err != nil { return err } @@ -492,7 +499,7 @@ func runConsole(ctx context.Context, cmd string) error { return err } - if err := ssh.Console(ctx, sshc, cmd, false); err != nil { + if err := ssh.Console(ctx, sshc, cmd, false, ""); err != nil { captureError(ctx, err, app) return err } diff --git a/internal/command/postgres/config_update.go b/internal/command/postgres/config_update.go index e56fb7286a..ade267d41c 100644 --- a/internal/command/postgres/config_update.go +++ b/internal/command/postgres/config_update.go @@ -328,7 +328,7 @@ func resolveConfigChanges(ctx context.Context, app *fly.AppCompact, manager stri } func resolveChangeLog(ctx context.Context, changes map[string]string, settings *flypg.PGSettings) (diff.Changelog, error) { - // Verify that input values are within acceptible ranges. + // Verify that input values are within acceptable ranges. // Stolon does not verify this, so we need to do it here. for k, v := range changes { for _, setting := range settings.Settings { diff --git a/internal/command/postgres/create.go b/internal/command/postgres/create.go index 9b701b3c65..f22a68bc3a 100644 --- a/internal/command/postgres/create.go +++ b/internal/command/postgres/create.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "reflect" "strings" "github.com/spf13/cobra" @@ -34,6 +35,7 @@ func newCreate() *cobra.Command { flag.Region(), flag.Org(), flag.Detach(), + flag.VMSizeFlags, flag.Bool{ Name: "enable-backups", Description: "Create a new tigris bucket and enable WAL-based backups", @@ -48,14 +50,6 @@ func newCreate() *cobra.Command { Shorthand: "p", Description: "The superuser password. The password will be generated for you if you leave this blank", }, - flag.String{ - Name: "vm-size", - Description: "the size of the VM", - }, - flag.Int{ - Name: "vm-memory", - Description: "the memory of the VM in MB", - }, flag.Int{ Name: "initial-cluster-size", Description: "Initial cluster size", @@ -297,6 +291,23 @@ func CreateCluster(ctx context.Context, org *fly.Organization, region *fly.Regio BarmanRemoteRestoreConfig: flag.GetString(ctx, "restore-target-app"), } + isCustomMachine := false + for _, sizeFlag := range flag.VMSizeFlags { + nameField := reflect.ValueOf(sizeFlag).FieldByName("Name") + + if nameField.IsValid() { + name := nameField.String() + if name == "vm-size" { + continue + } + + if flag.IsSpecified(ctx, name) { + isCustomMachine = true + break + } + } + } + var config *PostgresConfiguration customConfig := false @@ -305,8 +316,7 @@ func CreateCluster(ctx context.Context, org *fly.Organization, region *fly.Regio config = &conf customConfig = false } else { - - customConfig = params.DiskGb != 0 || params.VMSize != "" || params.InitialClusterSize != 0 || params.ScaleToZero != nil + customConfig = isCustomMachine || params.DiskGb != 0 || params.VMSize != "" || params.InitialClusterSize != 0 || params.ScaleToZero != nil if !customConfig { fmt.Fprintf(io.Out, "For pricing information visit: https://fly.io/docs/about/pricing/#postgresql-clusters") @@ -361,13 +371,22 @@ func CreateCluster(ctx context.Context, org *fly.Organization, region *fly.Regio } input.InitialClusterSize = params.PostgresConfiguration.InitialClusterSize - // Resolve VM size - vmSize, err := resolveVMSize(ctx, params.VMSize) - if err != nil { - return err - } + if isCustomMachine { + guest, err := flag.GetMachineGuest(ctx, nil) + if err != nil { + return err + } - input.VMSize = vmSize + input.Guest = guest + } else { + // Resolve VM size + vmSize, err := resolveVMSize(ctx, params.VMSize) + if err != nil { + return err + } + + input.VMSize = vmSize + } if params.ScaleToZero != nil { input.ScaleToZero = *params.ScaleToZero diff --git a/internal/command/postgres/detach.go b/internal/command/postgres/detach.go index 3f0ca0a1b8..0e6c836128 100644 --- a/internal/command/postgres/detach.go +++ b/internal/command/postgres/detach.go @@ -9,9 +9,11 @@ import ( "github.com/superfly/flyctl/agent" "github.com/superfly/flyctl/flypg" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/command/apps" "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flapsutil" "github.com/superfly/flyctl/internal/flyutil" mach "github.com/superfly/flyctl/internal/machine" "github.com/superfly/flyctl/internal/prompt" @@ -48,24 +50,24 @@ func runDetach(ctx context.Context) error { appName = appconfig.NameFromContext(ctx) ) - pgApp, err := client.GetAppCompact(ctx, pgAppName) + ctx, appFlapsClient, app, err := flapsutil.SetClient(ctx, nil, appName) if err != nil { - return fmt.Errorf("get postgres app: %w", err) + return err } - app, err := client.GetAppCompact(ctx, appName) + pgApp, err := client.GetAppCompact(ctx, pgAppName) if err != nil { - return fmt.Errorf("get app: %w", err) + return fmt.Errorf("get postgres app: %w", err) } ctx, err = apps.BuildContext(ctx, pgApp) if err != nil { return err } - return runMachineDetach(ctx, app, pgApp) + return runMachineDetach(ctx, appFlapsClient, app, pgApp) } -func runMachineDetach(ctx context.Context, app *fly.AppCompact, pgApp *fly.AppCompact) error { +func runMachineDetach(ctx context.Context, appFlapsClient flapsutil.FlapsClient, app *fly.AppCompact, pgApp *fly.AppCompact) error { var ( MinPostgresHaVersion = "0.0.19" MinPostgresFlexVersion = "0.0.3" @@ -90,11 +92,11 @@ func runMachineDetach(ctx context.Context, app *fly.AppCompact, pgApp *fly.AppCo return err } - return detachAppFromPostgres(ctx, leader.PrivateIP, app, pgApp) + return detachAppFromPostgres(ctx, leader.PrivateIP, appFlapsClient, app, pgApp) } // TODO - This process needs to be re-written to suppport non-interactive terminals. -func detachAppFromPostgres(ctx context.Context, leaderIP string, app *fly.AppCompact, pgApp *fly.AppCompact) error { +func detachAppFromPostgres(ctx context.Context, leaderIP string, appFlapsClient flapsutil.FlapsClient, app *fly.AppCompact, pgApp *fly.AppCompact) error { var ( client = flyutil.ClientFromContext(ctx) dialer = agent.DialerFromContext(ctx) @@ -142,16 +144,14 @@ func detachAppFromPostgres(ctx context.Context, leaderIP string, app *fly.AppCom } // Remove secret from consumer app. - _, err = client.UnsetSecrets(ctx, app.Name, []string{targetAttachment.EnvironmentVariableName}) + err = appsecrets.Update(ctx, appFlapsClient, app.Name, nil, []string{targetAttachment.EnvironmentVariableName}) if err != nil { - // This will error if secret doesn't exist, so just send to stdout. - fmt.Fprintln(io.Out, err.Error()) - } else { - fmt.Fprintf(io.Out, "Secret %q was scheduled to be removed from app %s\n", - targetAttachment.EnvironmentVariableName, - app.Name, - ) + return err } + fmt.Fprintf(io.Out, "Secret %q was scheduled to be removed from app %s\n", + targetAttachment.EnvironmentVariableName, + app.Name, + ) input := fly.DetachPostgresClusterInput{ AppID: app.Name, @@ -159,7 +159,7 @@ func detachAppFromPostgres(ctx context.Context, leaderIP string, app *fly.AppCom PostgresClusterAttachmentId: targetAttachment.ID, } - if err = client.DetachPostgresCluster(ctx, input); err != nil { + if err := client.DetachPostgresCluster(ctx, input); err != nil { return err } fmt.Fprintln(io.Out, "Detach completed successfully!") diff --git a/internal/command/postgres/failover.go b/internal/command/postgres/failover.go index 6555372d53..74d3921752 100644 --- a/internal/command/postgres/failover.go +++ b/internal/command/postgres/failover.go @@ -335,9 +335,10 @@ func handleFlexFailoverFail(ctx context.Context, machines []*fly.Machine) (err e if !strings.Contains(err.Error(), " lease not found") { return err } - } - if err := flapsClient.ReleaseLease(ctx, leader.ID, lease.Data.Nonce); err != nil { - return err + } else if lease.Data != nil { + if err := flapsClient.ReleaseLease(ctx, leader.ID, lease.Data.Nonce); err != nil { + return err + } } fmt.Println("Trying to start old leader") @@ -355,7 +356,7 @@ func handleFlexFailoverFail(ctx context.Context, machines []*fly.Machine) (err e return fmt.Errorf("old leader %s could not be started: %s", leader.ID, mach.Message) } - fmt.Println("Old leader started succesfully") + fmt.Println("Old leader started successfully") return nil } diff --git a/internal/command/postgres/import.go b/internal/command/postgres/import.go index f083f30528..cc1d5b32f1 100644 --- a/internal/command/postgres/import.go +++ b/internal/command/postgres/import.go @@ -9,9 +9,9 @@ import ( "github.com/mattn/go-colorable" "github.com/spf13/cobra" fly "github.com/superfly/fly-go" - "github.com/superfly/fly-go/flaps" "github.com/superfly/flyctl/agent" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/command/apps" "github.com/superfly/flyctl/internal/command/ssh" @@ -88,8 +88,7 @@ func runImport(ctx context.Context) error { // pre-fetch platform regions for later use prompt.PlatformRegions(ctx) - // Resolve target app - app, err := client.GetAppCompact(ctx, appName) + ctx, flapsClient, app, err := flapsutil.SetClient(ctx, nil, appName) if err != nil { return fmt.Errorf("failed to resolve app: %w", err) } @@ -98,14 +97,6 @@ func runImport(ctx context.Context) error { return fmt.Errorf("The target app must be a Postgres app") } - flapsClient, err := flapsutil.NewClientWithOptions(ctx, flaps.NewClientOpts{ - AppCompact: app, - AppName: appName, - }) - if err != nil { - return fmt.Errorf("list of machines could not be retrieved: %w", err) - } - machines, err := flapsClient.ListActive(ctx) if err != nil { return fmt.Errorf("could not retrieve machines: %w", err) @@ -135,9 +126,8 @@ func runImport(ctx context.Context) error { } // Set sourceURI as a secret - _, err = client.SetSecrets(ctx, app.Name, map[string]string{ - "SOURCE_DATABASE_URI": sourceURI, - }) + upd := map[string]string{"SOURCE_DATABASE_URI": sourceURI} + err = appsecrets.Update(ctx, flapsClient, app.Name, upd, nil) if err != nil { return fmt.Errorf("failed to set secrets: %s", err) } @@ -175,10 +165,16 @@ func runImport(ctx context.Context) error { } machineConfig.Image = imageRef + minvers, err := appsecrets.GetMinvers(appName) + if err != nil { + return err + } + ephemeralInput := &mach.EphemeralInput{ LaunchInput: fly.LaunchMachineInput{ - Region: region.Code, - Config: machineConfig, + Region: region.Code, + Config: machineConfig, + MinSecretsVersion: minvers, }, What: "to run the import process", } @@ -207,9 +203,9 @@ func runImport(ctx context.Context) error { } // Unset secret - _, err = client.UnsetSecrets(ctx, app.Name, []string{"SOURCE_DATABASE_URI"}) + err = appsecrets.Update(ctx, flapsClient, app.Name, nil, []string{"SOURCE_DATABASE_URI"}) if err != nil { - return fmt.Errorf("failed to set secrets: %s", err) + return fmt.Errorf("failed to unset secrets: %s", err) } return nil diff --git a/internal/command/postgres/postgres.go b/internal/command/postgres/postgres.go index 0908e742fd..8a86e1fdc7 100644 --- a/internal/command/postgres/postgres.go +++ b/internal/command/postgres/postgres.go @@ -18,33 +18,46 @@ import ( func New() *cobra.Command { const ( - short = `Manage Postgres clusters.` - - long = short + "\n" + short = `Unmanaged Postgres cluster commands` + notice = "Unmanaged Fly Postgres is not supported by Fly.io Support and users are responsible for operations, management, and disaster recovery. If you'd like a managed, supported solution, try 'fly mpg' (Managed Postgres).\n" + + "Please visit https://fly.io/docs/mpg/overview/ for more information about Managed Postgres.\n" + long = notice ) cmd := command.New("postgres", short, long, nil) - cmd.Aliases = []string{"pg"} - cmd.AddCommand( - newAttach(), - newBackup(), - newConfig(), - newConnect(), - newCreate(), - newDb(), - newDetach(), - newList(), - newRenewSSHCerts(), - newRestart(), - newUsers(), - newFailover(), - newAddFlycast(), - newImport(), - newEvents(), - newBarman(), - ) + // Add PreRun to show deprecation notice + cmd.PreRun = func(cmd *cobra.Command, args []string) { + io := iostreams.FromContext(cmd.Context()) + fmt.Fprintf(io.ErrOut, "\n%s\n", notice) + } + + // Add the same PreRun to all subcommands + subcommands := []func() *cobra.Command{ + newAttach, + newBackup, + newConfig, + newConnect, + newCreate, + newDb, + newDetach, + newList, + newRenewSSHCerts, + newRestart, + newUsers, + newFailover, + newAddFlycast, + newImport, + newEvents, + newBarman, + } + + for _, newCmd := range subcommands { + subcmd := newCmd() + subcmd.PreRun = cmd.PreRun + cmd.AddCommand(subcmd) + } return cmd } @@ -208,10 +221,31 @@ func UnregisterMember(ctx context.Context, app *fly.AppCompact, machine *fly.Mac return err } - hostname := fmt.Sprintf("%s.vm.%s.internal", machine.ID, app.Name) + machineVersionStr := strings.TrimPrefix(machine.ImageVersion(), "v") + + flyVersion, err := version.NewVersion(machineVersionStr) + if err != nil { + return fmt.Errorf("failed to parse machine version: %w", err) + } + + // This is the version where we begin using Machine IDs instead of hostnames + versionGate, err := version.NewVersion("0.0.63") + if err != nil { + return fmt.Errorf("failed to parse logic gate version: %w", err) + } + + if flyVersion.LessThan(versionGate) { + // Old logic + hostname := fmt.Sprintf("%s.vm.%s.internal", machine.ID, app.Name) + + if err := cmd.UnregisterMember(ctx, leader.PrivateIP, hostname); err != nil { + if err2 := cmd.UnregisterMember(ctx, leader.PrivateIP, machine.PrivateIP); err2 != nil { + return err + } + } - if err := cmd.UnregisterMember(ctx, leader.PrivateIP, hostname); err != nil { - if err2 := cmd.UnregisterMember(ctx, leader.PrivateIP, machine.PrivateIP); err2 != nil { + } else { + if err := cmd.UnregisterMember(ctx, leader.PrivateIP, machine.ID); err != nil { return err } } diff --git a/internal/command/postgres/renew_certs.go b/internal/command/postgres/renew_certs.go index cfad231bc0..72be607c7b 100644 --- a/internal/command/postgres/renew_certs.go +++ b/internal/command/postgres/renew_certs.go @@ -8,10 +8,12 @@ import ( "github.com/spf13/cobra" fly "github.com/superfly/fly-go" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/command/apps" "github.com/superfly/flyctl/internal/command/ssh" "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flapsutil" "github.com/superfly/flyctl/internal/flyutil" mach "github.com/superfly/flyctl/internal/machine" "github.com/superfly/flyctl/iostreams" @@ -45,10 +47,9 @@ func newRenewSSHCerts() *cobra.Command { func runRefreshSSHCerts(ctx context.Context) error { var ( appName = appconfig.NameFromContext(ctx) - client = flyutil.ClientFromContext(ctx) ) - app, err := client.GetAppCompact(ctx, appName) + ctx, flapsClient, app, err := flapsutil.SetClient(ctx, nil, appName) if err != nil { return err } @@ -62,10 +63,10 @@ func runRefreshSSHCerts(ctx context.Context) error { return err } - return refreshSSHCerts(ctx, app) + return refreshSSHCerts(ctx, flapsClient, app) } -func refreshSSHCerts(ctx context.Context, app *fly.AppCompact) error { +func refreshSSHCerts(ctx context.Context, flapsClient flapsutil.FlapsClient, app *fly.AppCompact) error { var ( io = iostreams.FromContext(ctx) client = flyutil.ClientFromContext(ctx) @@ -106,7 +107,7 @@ func refreshSSHCerts(ctx context.Context, app *fly.AppCompact) error { "SSH_CERT": cert.Certificate, } - _, err = client.SetSecrets(ctx, app.Name, secrets) + err = appsecrets.Update(ctx, flapsClient, app.Name, secrets, nil) if err != nil { return fmt.Errorf("failed to set ssh secrets: %w", err) } diff --git a/internal/command/redis/attach.go b/internal/command/redis/attach.go index b135776ca9..d7475312d8 100644 --- a/internal/command/redis/attach.go +++ b/internal/command/redis/attach.go @@ -5,18 +5,18 @@ import ( "fmt" "github.com/superfly/flyctl/gql" - "github.com/superfly/flyctl/internal/flyutil" + "github.com/superfly/flyctl/internal/appsecrets" + "github.com/superfly/flyctl/internal/flapsutil" "github.com/superfly/flyctl/iostreams" ) func AttachDatabase(ctx context.Context, db *gql.AddOn, appName string) (err error) { - client := flyutil.ClientFromContext(ctx) io := iostreams.FromContext(ctx) s := map[string]string{} s["REDIS_URL"] = db.PublicUrl - _, err = client.SetSecrets(ctx, appName, s) - + flapsClient := flapsutil.ClientFromContext(ctx) + err = appsecrets.Update(ctx, flapsClient, appName, s, nil) if err != nil { fmt.Fprintf(io.Out, "\nCould not attach Redis database %s to app %s\n", db.Name, appName) } else { diff --git a/internal/command/redis/create.go b/internal/command/redis/create.go index c0992af6e9..f3e7f554f5 100644 --- a/internal/command/redis/create.go +++ b/internal/command/redis/create.go @@ -4,9 +4,9 @@ import ( "context" "errors" "fmt" + "slices" "github.com/spf13/cobra" - "golang.org/x/exp/slices" fly "github.com/superfly/fly-go" "github.com/superfly/flyctl/gql" @@ -94,7 +94,7 @@ func runCreate(ctx context.Context) (err error) { return err } - var enableEviction bool = false + var enableEviction = false if flag.GetBool(ctx, "enable-eviction") { enableEviction = true @@ -146,7 +146,7 @@ func Create(ctx context.Context, org *fly.Organization, name string, region *fly } } - plan, err := DeterminePlan(ctx, org) + plan, err := DeterminePlan(ctx) if err != nil { return nil, err } @@ -216,7 +216,7 @@ func ProvisionDatabase(ctx context.Context, org *fly.Organization, config RedisC return &response.CreateAddOn.AddOn, nil } -func DeterminePlan(ctx context.Context, org *fly.Organization) (*gql.ListAddOnPlansAddOnPlansAddOnPlanConnectionNodesAddOnPlan, error) { +func DeterminePlan(ctx context.Context) (*gql.ListAddOnPlansAddOnPlansAddOnPlanConnectionNodesAddOnPlan, error) { client := flyutil.ClientFromContext(ctx) planId := redisPlanPayAsYouGo diff --git a/internal/command/redis/destroy.go b/internal/command/redis/destroy.go index aef7a4881a..7ea37db30d 100644 --- a/internal/command/redis/destroy.go +++ b/internal/command/redis/destroy.go @@ -63,7 +63,7 @@ func runDestroy(ctx context.Context) (err error) { name := flag.FirstArg(ctx) - _, err = gql.DeleteAddOn(ctx, client, name) + _, err = gql.DeleteAddOn(ctx, client, name, string(gql.AddOnTypeUpstashRedis)) if err != nil { return diff --git a/internal/command/redis/status.go b/internal/command/redis/status.go index 6c704f46c0..5d12eeacde 100644 --- a/internal/command/redis/status.go +++ b/internal/command/redis/status.go @@ -46,7 +46,7 @@ func runStatus(ctx context.Context) (err error) { addOn := response.AddOn - var readRegions string = "None" + var readRegions = "None" if len(addOn.ReadRegions) > 0 { readRegions = strings.Join(addOn.ReadRegions, ",") @@ -76,7 +76,7 @@ func runStatus(ctx context.Context) (err error) { }, } - var cols []string = []string{"ID", "Name", "Plan", "Primary Region", "Read Regions", "Eviction", "Private URL"} + var cols = []string{"ID", "Name", "Plan", "Primary Region", "Read Regions", "Eviction", "Private URL"} if err = render.VerticalTable(io.Out, "Redis", obj, cols...); err != nil { return diff --git a/internal/command/root/root.go b/internal/command/root/root.go index dd0880c128..2297677021 100644 --- a/internal/command/root/root.go +++ b/internal/command/root/root.go @@ -42,8 +42,10 @@ import ( "github.com/superfly/flyctl/internal/command/lfsc" "github.com/superfly/flyctl/internal/command/logs" "github.com/superfly/flyctl/internal/command/machine" + "github.com/superfly/flyctl/internal/command/mcp" "github.com/superfly/flyctl/internal/command/metrics" "github.com/superfly/flyctl/internal/command/move" + "github.com/superfly/flyctl/internal/command/mpg" "github.com/superfly/flyctl/internal/command/mysql" "github.com/superfly/flyctl/internal/command/open" "github.com/superfly/flyctl/internal/command/orgs" @@ -126,6 +128,8 @@ func New() *cobra.Command { group(ping.New(), "upkeep"), group(proxy.New(), "upkeep"), group(postgres.New(), "dbs_and_extensions"), + group(mcp.New(), "upkeep"), + group(mpg.New(), "dbs_and_extensions"), group(ips.New(), "configuring"), group(secrets.New(), "configuring"), group(ssh.New(), "upkeep"), diff --git a/internal/command/scale/count.go b/internal/command/scale/count.go index c2edb39d7e..0be44fb628 100644 --- a/internal/command/scale/count.go +++ b/internal/command/scale/count.go @@ -3,6 +3,7 @@ package scale import ( "context" "fmt" + "maps" "slices" "strconv" "strings" @@ -16,7 +17,6 @@ import ( "github.com/superfly/flyctl/internal/flag" "github.com/superfly/flyctl/internal/flag/completion" "github.com/superfly/flyctl/internal/flapsutil" - "golang.org/x/exp/maps" ) func newScaleCount() *cobra.Command { @@ -75,7 +75,7 @@ func runScaleCount(ctx context.Context) error { return err } - unknownNames := lo.Filter(maps.Keys(groups), func(x string, _ int) bool { + unknownNames := lo.Filter(slices.Collect(maps.Keys(groups)), func(x string, _ int) bool { return !slices.Contains(processNames, x) }) if len(unknownNames) > 0 { diff --git a/internal/command/scale/count_machines.go b/internal/command/scale/count_machines.go index 51aa6d1e6b..f5ae2fa92c 100644 --- a/internal/command/scale/count_machines.go +++ b/internal/command/scale/count_machines.go @@ -11,6 +11,7 @@ import ( fly "github.com/superfly/fly-go" "github.com/superfly/flyctl/helpers" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/cmdutil" "github.com/superfly/flyctl/internal/flag" "github.com/superfly/flyctl/internal/flapsutil" @@ -71,7 +72,7 @@ func runMachinesScaleCount(ctx context.Context, appName string, appConfig *appco defaults := newDefaults(appConfig, latestCompleteRelease, machines, volumes, flag.GetString(ctx, "from-snapshot"), flag.GetBool(ctx, "with-new-volumes"), defaultGuest) - actions, err := computeActions(machines, expectedGroupCounts, regions, maxPerRegion, defaults) + actions, err := computeActions(appName, machines, expectedGroupCounts, regions, maxPerRegion, defaults) if err != nil { return err } @@ -258,7 +259,7 @@ func (pi *planItem) MachineSize() string { return "" } -func computeActions(machines []*fly.Machine, expectedGroupCounts groupCounts, regions []string, maxPerRegion int, defaults *defaultValues) ([]*planItem, error) { +func computeActions(appName string, machines []*fly.Machine, expectedGroupCounts groupCounts, regions []string, maxPerRegion int, defaults *defaultValues) ([]*planItem, error) { actions := make([]*planItem, 0) seenGroups := make(map[string]bool) machineGroups := lo.GroupBy(machines, func(m *fly.Machine) string { @@ -272,6 +273,11 @@ func computeActions(machines []*fly.Machine, expectedGroupCounts groupCounts, re return max(count, 0) }) + minvers, err := appsecrets.GetMinvers(appName) + if err != nil { + return nil, err + } + for groupName, groupMachines := range machineGroups { expected, ok := expectedCounts[groupName] // Ignore the group if it is not expected to change @@ -304,7 +310,7 @@ func computeActions(machines []*fly.Machine, expectedGroupCounts groupCounts, re Region: region, Delta: delta, Machines: perRegionMachines[region], - LaunchMachineInput: &fly.LaunchMachineInput{Region: region, Config: mConfig}, + LaunchMachineInput: &fly.LaunchMachineInput{Region: region, Config: mConfig, MinSecretsVersion: minvers}, Volumes: defaults.PopAvailableVolumes(mConfig, region, delta), CreateVolumeRequest: defaults.CreateVolumeRequest(mConfig, region, delta), }) @@ -332,7 +338,7 @@ func computeActions(machines []*fly.Machine, expectedGroupCounts groupCounts, re GroupName: groupName, Region: region, Delta: delta, - LaunchMachineInput: &fly.LaunchMachineInput{Region: region, Config: mConfig}, + LaunchMachineInput: &fly.LaunchMachineInput{Region: region, Config: mConfig, MinSecretsVersion: minvers}, Volumes: defaults.PopAvailableVolumes(mConfig, region, delta), CreateVolumeRequest: defaults.CreateVolumeRequest(mConfig, region, delta), }) diff --git a/internal/command/scale/machine_defaults.go b/internal/command/scale/machine_defaults.go index f540998b43..cc938cd449 100644 --- a/internal/command/scale/machine_defaults.go +++ b/internal/command/scale/machine_defaults.go @@ -68,7 +68,7 @@ func newDefaults(appConfig *appconfig.Config, latest fly.Release, machines []*fl defaults.existingVolumes = lo.MapValues( lo.GroupBy( lo.FilterMap(volumes, func(v fly.Volume, _ int) (*fly.Volume, bool) { - return &v, !v.IsAttached() + return &v, !v.IsAttached() && v.HostStatus == "ok" }), func(v *fly.Volume) string { return v.Name }, ), diff --git a/internal/command/scale/machines.go b/internal/command/scale/machines.go index 18a334f19c..cf3c818138 100644 --- a/internal/command/scale/machines.go +++ b/internal/command/scale/machines.go @@ -8,6 +8,7 @@ import ( fly "github.com/superfly/fly-go" "github.com/superfly/fly-go/flaps" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/flapsutil" mach "github.com/superfly/flyctl/internal/machine" ) @@ -51,6 +52,11 @@ func v2ScaleVM(ctx context.Context, appName, group, sizeName string, memoryMB in return nil, err } + minvers, err := appsecrets.GetMinvers(appName) + if err != nil { + return nil, err + } + for _, machine := range machines { if sizeName != "" { machine.Config.Guest.SetSize(sizeName) @@ -60,9 +66,10 @@ func v2ScaleVM(ctx context.Context, appName, group, sizeName string, memoryMB in } input := &fly.LaunchMachineInput{ - Name: machine.Name, - Region: machine.Region, - Config: machine.Config, + Name: machine.Name, + Region: machine.Region, + Config: machine.Config, + MinSecretsVersion: minvers, } if err := mach.Update(ctx, machine, input); err != nil { return nil, err diff --git a/internal/command/scale/vm.go b/internal/command/scale/vm.go index 20b24b58f2..6e37da5815 100644 --- a/internal/command/scale/vm.go +++ b/internal/command/scale/vm.go @@ -17,10 +17,11 @@ func newScaleVm() *cobra.Command { short = "Change an app's VM to a named size (eg. shared-cpu-1x, performance-1x, performance-2x...)" long = `Change an application's VM size to one of the named VM sizes. -For a full list of supported sizes use the command 'flyctl platform vm-sizes' +For a full list of supported sizes use the command ` + "`flyctl platform vm-sizes`" + `. -Memory size can be set with --memory=number-of-MB -e.g. flyctl scale vm shared-cpu-1x --memory=2048 +Memory size can be set with the ` + "`--vm-memory`" + ` flag followed by the number of MB. + +For example: ` + "`flyctl scale vm shared-cpu-1x --vm-memory=2048`" + `. For pricing, see https://fly.io/docs/about/pricing/` ) diff --git a/internal/command/secrets/deploy.go b/internal/command/secrets/deploy.go index 7fa3f4fcd4..543c152cef 100644 --- a/internal/command/secrets/deploy.go +++ b/internal/command/secrets/deploy.go @@ -2,16 +2,13 @@ package secrets import ( "context" - "fmt" "github.com/spf13/cobra" - "github.com/superfly/fly-go/flaps" "github.com/superfly/flyctl/internal/appconfig" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/flag" "github.com/superfly/flyctl/internal/flapsutil" "github.com/superfly/flyctl/internal/flyerr" - "github.com/superfly/flyctl/internal/flyutil" ) func newDeploy() (cmd *cobra.Command) { @@ -27,29 +24,23 @@ func newDeploy() (cmd *cobra.Command) { flag.App(), flag.AppConfig(), flag.Detach(), + flag.Bool{ + Name: "dns-checks", + Description: "Perform DNS checks during deployment", + Default: true, + }, ) return cmd } func runDeploy(ctx context.Context) (err error) { - client := flyutil.ClientFromContext(ctx) appName := appconfig.NameFromContext(ctx) - app, err := client.GetAppCompact(ctx, appName) + ctx, flapsClient, app, err := flapsutil.SetClient(ctx, nil, appName) if err != nil { return err } - flapsClient, err := flapsutil.NewClientWithOptions(ctx, flaps.NewClientOpts{ - AppCompact: app, - AppName: app.Name, - }) - if err != nil { - return flyerr.GenericErr{ - Err: fmt.Sprintf("could not create flaps client: %v", err), - } - } - machines, _, err := flapsClient.ListFlyAppsMachines(ctx) if err != nil { return err @@ -63,5 +54,9 @@ func runDeploy(ctx context.Context) (err error) { } } - return DeploySecrets(ctx, app, false, flag.GetBool(ctx, "detach")) + return DeploySecrets(ctx, app, DeploymentArgs{ + Stage: false, + Detach: flag.GetBool(ctx, "detach"), + CheckDNS: flag.GetBool(ctx, "dns-checks"), + }) } diff --git a/internal/command/secrets/import.go b/internal/command/secrets/import.go index 3e1bf026a0..b06e3f4cf2 100644 --- a/internal/command/secrets/import.go +++ b/internal/command/secrets/import.go @@ -10,7 +10,7 @@ import ( "github.com/superfly/flyctl/internal/appconfig" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/flag" - "github.com/superfly/flyctl/internal/flyutil" + "github.com/superfly/flyctl/internal/flapsutil" ) func newImport() (cmd *cobra.Command) { @@ -30,11 +30,10 @@ func newImport() (cmd *cobra.Command) { } func runImport(ctx context.Context) (err error) { - client := flyutil.ClientFromContext(ctx) appName := appconfig.NameFromContext(ctx) - app, err := client.GetAppCompact(ctx, appName) + ctx, flapsClient, app, err := flapsutil.SetClient(ctx, nil, appName) if err != nil { - return + return err } secrets, err := parseSecrets(os.Stdin) @@ -45,5 +44,9 @@ func runImport(ctx context.Context) (err error) { return errors.New("requires at least one SECRET=VALUE pair") } - return SetSecretsAndDeploy(ctx, app, secrets, flag.GetBool(ctx, "stage"), flag.GetBool(ctx, "detach")) + return SetSecretsAndDeploy(ctx, flapsClient, app, secrets, DeploymentArgs{ + Stage: flag.GetBool(ctx, "stage"), + Detach: flag.GetBool(ctx, "detach"), + CheckDNS: flag.GetBool(ctx, "dns-checks"), + }) } diff --git a/internal/command/secrets/key_delete.go b/internal/command/secrets/key_delete.go index 0d37323fb3..682950d734 100644 --- a/internal/command/secrets/key_delete.go +++ b/internal/command/secrets/key_delete.go @@ -7,8 +7,10 @@ import ( "github.com/spf13/cobra" "github.com/superfly/fly-go/flaps" + "github.com/superfly/flyctl/internal/appconfig" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flapsutil" "github.com/superfly/flyctl/internal/prompt" "github.com/superfly/flyctl/iostreams" ) @@ -52,12 +54,13 @@ func runKeyDelete(ctx context.Context) (err error) { return err } - flapsClient, err := getFlapsClient(ctx) + appName := appconfig.NameFromContext(ctx) + ctx, flapsClient, _, err := flapsutil.SetClient(ctx, nil, appName) if err != nil { return err } - secrets, err := flapsClient.ListSecrets(ctx) + secrets, err := flapsClient.ListSecretKeys(ctx, nil) if err != nil { return err } @@ -66,7 +69,7 @@ func runKeyDelete(ctx context.Context) (err error) { var rerr error out := iostreams.FromContext(ctx).Out for _, secret := range secrets { - ver2, prefix2, err := SplitLabelKeyver(secret.Label) + ver2, prefix2, err := SplitLabelKeyver(secret.Name) if err != nil { continue } @@ -87,7 +90,7 @@ func runKeyDelete(ctx context.Context) (err error) { } if !flag.GetBool(ctx, "force") { - confirm, err := prompt.Confirm(ctx, fmt.Sprintf("delete secrets key %s?", secret.Label)) + confirm, err := prompt.Confirm(ctx, fmt.Sprintf("delete secrets key %s?", secret.Name)) if err != nil { rerr = errors.Join(rerr, err) continue @@ -97,15 +100,15 @@ func runKeyDelete(ctx context.Context) (err error) { } } - err = flapsClient.DeleteSecret(ctx, secret.Label) + err = flapsClient.DeleteSecretKey(ctx, secret.Name) if err != nil { var ferr *flaps.FlapsError if errors.As(err, &ferr) && ferr.ResponseStatusCode == 404 { err = fmt.Errorf("not found") } - rerr = errors.Join(rerr, fmt.Errorf("deleting %v: %w", secret.Label, err)) + rerr = errors.Join(rerr, fmt.Errorf("deleting %v: %w", secret.Name, err)) } else { - fmt.Fprintf(out, "Deleted %v\n", secret.Label) + fmt.Fprintf(out, "Deleted %v\n", secret.Name) } } return rerr diff --git a/internal/command/secrets/key_set.go b/internal/command/secrets/key_set.go index 0f5aaf0fe9..d1a62dae8e 100644 --- a/internal/command/secrets/key_set.go +++ b/internal/command/secrets/key_set.go @@ -6,9 +6,10 @@ import ( "fmt" "github.com/spf13/cobra" - fly "github.com/superfly/fly-go" + "github.com/superfly/flyctl/internal/appconfig" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flapsutil" "github.com/superfly/flyctl/iostreams" ) @@ -79,12 +80,13 @@ func runKeySetOrGenerate(ctx context.Context) (err error) { } } - flapsClient, err := getFlapsClient(ctx) + appName := appconfig.NameFromContext(ctx) + ctx, flapsClient, _, err := flapsutil.SetClient(ctx, nil, appName) if err != nil { return err } - secrets, err := flapsClient.ListSecrets(ctx) + secrets, err := flapsClient.ListSecretKeys(ctx, nil) if err != nil { return err } @@ -93,13 +95,13 @@ func runKeySetOrGenerate(ctx context.Context) (err error) { // while finding the highest version with the same prefix. bestVer := KeyverUnspec for _, secret := range secrets { - if label == secret.Label { + if label == secret.Name { if !flag.GetBool(ctx, "force") { return fmt.Errorf("refusing to overwrite existing key") } } - ver2, prefix2, err := SplitLabelKeyver(secret.Label) + ver2, prefix2, err := SplitLabelKeyver(secret.Name) if err != nil { continue } @@ -111,7 +113,7 @@ func runKeySetOrGenerate(ctx context.Context) (err error) { semType2, _ := SecretTypeToSemanticType(secret.Type) if semType2 != semType { typs := secretTypeToString(secret.Type) - return fmt.Errorf("key %v (%v) has conflicting type %v (%v)", prefix, secret.Label, semType2, typs) + return fmt.Errorf("key %v (%v) has conflicting type %v (%v)", prefix, secret.Name, semType2, typs) } if CompareKeyver(ver2, bestVer) > 0 { @@ -136,9 +138,9 @@ func runKeySetOrGenerate(ctx context.Context) (err error) { } if gen { - err = flapsClient.GenerateSecret(ctx, label, typ) + _, err = flapsClient.GenerateSecretKey(ctx, label, typ) } else { - err = flapsClient.CreateSecret(ctx, label, typ, fly.CreateSecretRequest{Value: val}) + _, err = flapsClient.SetSecretKey(ctx, label, typ, val) } if err != nil { return err diff --git a/internal/command/secrets/keys.go b/internal/command/secrets/keys.go index af254987e1..cc1fafd608 100644 --- a/internal/command/secrets/keys.go +++ b/internal/command/secrets/keys.go @@ -1,30 +1,24 @@ package secrets import ( - "context" - "fmt" "strings" "github.com/spf13/cobra" fly "github.com/superfly/fly-go" - "github.com/superfly/fly-go/flaps" - "github.com/superfly/flyctl/internal/appconfig" "github.com/superfly/flyctl/internal/command" - "github.com/superfly/flyctl/internal/flapsutil" - "github.com/superfly/flyctl/internal/flyutil" ) type SecretType = string const ( - SECRET_TYPE_KMS_HS256 = fly.SECRET_TYPE_KMS_HS256 - SECRET_TYPE_KMS_HS384 = fly.SECRET_TYPE_KMS_HS384 - SECRET_TYPE_KMS_HS512 = fly.SECRET_TYPE_KMS_HS512 - SECRET_TYPE_KMS_XAES256GCM = fly.SECRET_TYPE_KMS_XAES256GCM - SECRET_TYPE_KMS_NACL_AUTH = fly.SECRET_TYPE_KMS_NACL_AUTH - SECRET_TYPE_KMS_NACL_BOX = fly.SECRET_TYPE_KMS_NACL_BOX - SECRET_TYPE_KMS_NACL_SECRETBOX = fly.SECRET_TYPE_KMS_NACL_SECRETBOX - SECRET_TYPE_KMS_NACL_SIGN = fly.SECRET_TYPE_KMS_NACL_SIGN + SECRETKEY_TYPE_HS256 = fly.SECRETKEY_TYPE_HS256 + SECRETKEY_TYPE_HS384 = fly.SECRETKEY_TYPE_HS384 + SECRETKEY_TYPE_HS512 = fly.SECRETKEY_TYPE_HS512 + SECRETKEY_TYPE_XAES256GCM = fly.SECRETKEY_TYPE_XAES256GCM + SECRETKEY_TYPE_NACL_AUTH = fly.SECRETKEY_TYPE_NACL_AUTH + SECRETKEY_TYPE_NACL_BOX = fly.SECRETKEY_TYPE_NACL_BOX + SECRETKEY_TYPE_NACL_SECRETBOX = fly.SECRETKEY_TYPE_NACL_SECRETBOX + SECRETKEY_TYPE_NACL_SIGN = fly.SECRETKEY_TYPE_NACL_SIGN ) func newKeys() *cobra.Command { @@ -54,22 +48,3 @@ func newKeys() *cobra.Command { func secretTypeToString(sType string) string { return strings.TrimPrefix(strings.ToLower(sType), "secret_type_kms_") } - -// getFlapsClient builds and returns a flaps client for the App from the context. -func getFlapsClient(ctx context.Context) (*flaps.Client, error) { - client := flyutil.ClientFromContext(ctx) - appName := appconfig.NameFromContext(ctx) - app, err := client.GetAppCompact(ctx, appName) - if err != nil { - return nil, fmt.Errorf("get app: %w", err) - } - - flapsClient, err := flapsutil.NewClientWithOptions(ctx, flaps.NewClientOpts{ - AppCompact: app, - AppName: app.Name, - }) - if err != nil { - return nil, fmt.Errorf("could not create flaps client: %w", err) - } - return flapsClient, nil -} diff --git a/internal/command/secrets/keys_common.go b/internal/command/secrets/keys_common.go index d7c1c7baf9..f2dde4be64 100644 --- a/internal/command/secrets/keys_common.go +++ b/internal/command/secrets/keys_common.go @@ -25,18 +25,18 @@ type KeyTypeInfo struct { // In this list, the most preferred types are listed first. var supportedKeyTypes = []KeyTypeInfo{ // Preferred key types: - {SECRET_TYPE_KMS_NACL_AUTH, SemTypeSigning}, - {SECRET_TYPE_KMS_NACL_SECRETBOX, SemTypeEncrypting}, + {SECRETKEY_TYPE_NACL_AUTH, SemTypeSigning}, + {SECRETKEY_TYPE_NACL_SECRETBOX, SemTypeEncrypting}, // Also supported key types: - {SECRET_TYPE_KMS_HS256, SemTypeSigning}, - {SECRET_TYPE_KMS_HS384, SemTypeSigning}, - {SECRET_TYPE_KMS_HS512, SemTypeSigning}, - {SECRET_TYPE_KMS_XAES256GCM, SemTypeEncrypting}, + {SECRETKEY_TYPE_HS256, SemTypeSigning}, + {SECRETKEY_TYPE_HS384, SemTypeSigning}, + {SECRETKEY_TYPE_HS512, SemTypeSigning}, + {SECRETKEY_TYPE_XAES256GCM, SemTypeEncrypting}, // Unsupported: - // SECRET_TYPE_KMS_NACL_BOX, SemTypePublicEncrypting - // SECRET_TYPE_KMS_NACL_SIGN, SmeTypePublicSigning + // SECRETKEY_TYPE_NACL_BOX, SemTypePublicEncrypting + // SECRETKEY_TYPE_NACL_SIGN, SmeTypePublicSigning } // SupportedSecretTypes is a list of the SecretTypes for supported key types. diff --git a/internal/command/secrets/keys_list.go b/internal/command/secrets/keys_list.go index b253a68f0d..c86bd60f42 100644 --- a/internal/command/secrets/keys_list.go +++ b/internal/command/secrets/keys_list.go @@ -8,9 +8,11 @@ import ( "github.com/spf13/cobra" fly "github.com/superfly/fly-go" + "github.com/superfly/flyctl/internal/appconfig" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/config" "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flapsutil" "github.com/superfly/flyctl/internal/render" "github.com/superfly/flyctl/iostreams" ) @@ -36,12 +38,12 @@ name and version.` return cmd } -func compareSecrets(a, b fly.ListSecret) int { - aver, aprefix, err1 := SplitLabelKeyver(a.Label) +func compareSecrets(a, b fly.SecretKey) int { + aver, aprefix, err1 := SplitLabelKeyver(a.Name) if err1 != nil { return -1 } - bver, bprefix, err2 := SplitLabelKeyver(b.Label) + bver, bprefix, err2 := SplitLabelKeyver(b.Name) if err2 != nil { return 1 } @@ -66,12 +68,14 @@ type jsonSecret struct { func runKeysList(ctx context.Context) (err error) { cfg := config.FromContext(ctx) out := iostreams.FromContext(ctx).Out - flapsClient, err := getFlapsClient(ctx) + + appName := appconfig.NameFromContext(ctx) + ctx, flapsClient, _, err := flapsutil.SetClient(ctx, nil, appName) if err != nil { return err } - secrets, err := flapsClient.ListSecrets(ctx) + secrets, err := flapsClient.ListSecretKeys(ctx, nil) if err != nil { return err } @@ -85,12 +89,12 @@ func runKeysList(ctx context.Context) (err error) { continue } - ver, prefix, err := SplitLabelKeyver(secret.Label) + ver, prefix, err := SplitLabelKeyver(secret.Name) if err != nil { continue } jsecret := jsonSecret{ - Label: secret.Label, + Label: secret.Name, Name: prefix, Version: ver.String(), SemType: string(semType), diff --git a/internal/command/secrets/list.go b/internal/command/secrets/list.go index c928ab9bd5..90f9071d13 100644 --- a/internal/command/secrets/list.go +++ b/internal/command/secrets/list.go @@ -5,11 +5,11 @@ import ( "github.com/spf13/cobra" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/config" "github.com/superfly/flyctl/internal/flag" - "github.com/superfly/flyctl/internal/flyutil" - "github.com/superfly/flyctl/internal/format" + "github.com/superfly/flyctl/internal/flapsutil" "github.com/superfly/flyctl/internal/render" "github.com/superfly/flyctl/iostreams" ) @@ -37,12 +37,16 @@ actual value of the secret is only available to the application.` } func runList(ctx context.Context) (err error) { - client := flyutil.ClientFromContext(ctx) appName := appconfig.NameFromContext(ctx) - out := iostreams.FromContext(ctx).Out - secrets, err := client.GetAppSecrets(ctx, appName) + ctx, flapsClient, app, err := flapsutil.SetClient(ctx, nil, appName) + if err != nil { + return err + } + cfg := config.FromContext(ctx) + out := iostreams.FromContext(ctx).Out + secrets, err := appsecrets.List(ctx, flapsClient, app.Name) if err != nil { return err } @@ -53,14 +57,12 @@ func runList(ctx context.Context) (err error) { rows = append(rows, []string{ secret.Name, secret.Digest, - format.RelativeTime(secret.CreatedAt), }) } headers := []string{ "Name", "Digest", - "Created At", } if cfg.JSONOutput { return render.JSON(out, secrets) diff --git a/internal/command/secrets/parser.go b/internal/command/secrets/parser.go index 704e312c67..e7195708e4 100644 --- a/internal/command/secrets/parser.go +++ b/internal/command/secrets/parser.go @@ -28,24 +28,29 @@ func parseSecrets(reader io.Reader) (map[string]string, error) { continue } - parts := strings.SplitN(line, "=", 2) - if len(parts) != 2 { + key, value, ok := strings.Cut(line, "=") + if !ok { return nil, fmt.Errorf("Secrets must be provided as NAME=VALUE pairs (%s is invalid)", line) } + key = strings.TrimSpace(key) + value = strings.TrimLeft(value, " ") + l, _, ok := strings.Cut(value, "#") + if ok && strings.Count(l, `"`)%2 == 0 { + value = strings.TrimRight(l, " ") + } - if strings.HasPrefix(parts[1], `"""`) { + if strings.HasPrefix(value, `"""`) { // Switch to multiline parserState = parserStateMultiline - parsedKey = parts[0] - parsedVal.WriteString(strings.TrimPrefix(parts[1], `"""`)) + parsedKey = key + parsedVal.WriteString(strings.TrimPrefix(value, `"""`)) parsedVal.WriteString("\n") } else { - value := parts[1] if strings.HasPrefix(value, `"`) && strings.HasSuffix(value, `"`) { // Remove double quotes value = value[1 : len(value)-1] } - secrets[parts[0]] = value + secrets[key] = value } case parserStateMultiline: if strings.HasSuffix(line, `"""`) { diff --git a/internal/command/secrets/parser_test.go b/internal/command/secrets/parser_test.go index 49aa8ea5ec..2c87d2ae4e 100644 --- a/internal/command/secrets/parser_test.go +++ b/internal/command/secrets/parser_test.go @@ -90,3 +90,23 @@ func Test_parse_with_double_quotes(t *testing.T) { "FOO": "BAR BAZ", }, secrets) } + +// https://github.com/superfly/flyctl/issues/3002 +func Test_parse_with_spaces(t *testing.T) { + reader := strings.NewReader(`FOO = BAR`) + secrets, err := parseSecrets(reader) + assert.NoError(t, err) + assert.Equal(t, map[string]string{ + "FOO": "BAR", + }, secrets) +} + +// https://github.com/superfly/flyctl/issues/4291 +func Test_parse_with_comment(t *testing.T) { + reader := strings.NewReader(`FOO="BAR BAZ" # comment`) + secrets, err := parseSecrets(reader) + assert.NoError(t, err) + assert.Equal(t, map[string]string{ + "FOO": "BAR BAZ", + }, secrets) +} diff --git a/internal/command/secrets/secrets.go b/internal/command/secrets/secrets.go index 92d0d9792a..3919fd5236 100644 --- a/internal/command/secrets/secrets.go +++ b/internal/command/secrets/secrets.go @@ -6,7 +6,6 @@ import ( "github.com/spf13/cobra" fly "github.com/superfly/fly-go" - "github.com/superfly/fly-go/flaps" "github.com/superfly/flyctl/internal/appconfig" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/command/deploy" @@ -24,6 +23,11 @@ var sharedFlags = flag.Set{ Name: "stage", Description: "Set secrets but skip deployment for machine apps", }, + flag.Bool{ + Name: "dns-checks", + Description: "Perform DNS checks during deployment", + Default: true, + }, } func New() *cobra.Command { @@ -41,6 +45,7 @@ func New() *cobra.Command { secrets.AddCommand( newList(), newSet(), + newSync(), newUnset(), newImport(), newDeploy(), @@ -50,24 +55,26 @@ func New() *cobra.Command { return secrets } -func DeploySecrets(ctx context.Context, app *fly.AppCompact, stage bool, detach bool) error { +type DeploymentArgs struct { + Stage bool + Detach bool + CheckDNS bool +} + +// DeploySecrets deploys machines with the new secret if this step is not to be skipped. +func DeploySecrets(ctx context.Context, app *fly.AppCompact, args DeploymentArgs) error { out := iostreams.FromContext(ctx).Out - if stage { + if args.Stage { fmt.Fprint(out, "Secrets have been staged, but not set on VMs. Deploy or update machines in this app for the secrets to take effect.\n") return nil } - flapsClient, err := flapsutil.NewClientWithOptions(ctx, flaps.NewClientOpts{ - AppCompact: app, - AppName: app.Name, - }) - if err != nil { - return fmt.Errorf("could not create flaps client: %w", err) - } - ctx = flapsutil.NewContextWithClient(ctx, flapsClient) - // Due to https://github.com/superfly/web/issues/1397 we have to be extra careful + flapsClient := flapsutil.ClientFromContext(ctx) + if flapsClient == nil { + return fmt.Errorf("flaps client missing from context") + } machines, _, err := flapsClient.ListFlyAppsMachines(ctx) if err != nil { return err @@ -88,7 +95,8 @@ func DeploySecrets(ctx context.Context, app *fly.AppCompact, stage bool, detach md, err := deploy.NewMachineDeployment(ctx, deploy.MachineDeploymentArgs{ AppCompact: app, RestartOnly: true, - SkipHealthChecks: detach, + SkipHealthChecks: args.Detach, + SkipDNSChecks: args.Detach || !args.CheckDNS, }) if err != nil { sentry.CaptureExceptionWithAppInfo(ctx, err, "secrets", app) diff --git a/internal/command/secrets/set.go b/internal/command/secrets/set.go index e5d205827a..0c13a55c9d 100644 --- a/internal/command/secrets/set.go +++ b/internal/command/secrets/set.go @@ -9,10 +9,11 @@ import ( fly "github.com/superfly/fly-go" "github.com/superfly/flyctl/helpers" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/cmdutil" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/flag" - "github.com/superfly/flyctl/internal/flyutil" + "github.com/superfly/flyctl/internal/flapsutil" ) func newSet() (cmd *cobra.Command) { @@ -34,9 +35,8 @@ func newSet() (cmd *cobra.Command) { } func runSet(ctx context.Context) (err error) { - client := flyutil.ClientFromContext(ctx) appName := appconfig.NameFromContext(ctx) - app, err := client.GetAppCompact(ctx, appName) + ctx, flapsClient, app, err := flapsutil.SetClient(ctx, nil, appName) if err != nil { return err } @@ -63,14 +63,17 @@ func runSet(ctx context.Context) (err error) { return errors.New("requires at least one SECRET=VALUE pair") } - return SetSecretsAndDeploy(ctx, app, secrets, flag.GetBool(ctx, "stage"), flag.GetBool(ctx, "detach")) + return SetSecretsAndDeploy(ctx, flapsClient, app, secrets, DeploymentArgs{ + Stage: flag.GetBool(ctx, "stage"), + Detach: flag.GetBool(ctx, "detach"), + CheckDNS: flag.GetBool(ctx, "dns-checks"), + }) } -func SetSecretsAndDeploy(ctx context.Context, app *fly.AppCompact, secrets map[string]string, stage bool, detach bool) error { - client := flyutil.ClientFromContext(ctx) - if _, err := client.SetSecrets(ctx, app.Name, secrets); err != nil { - return err +func SetSecretsAndDeploy(ctx context.Context, flapsClient flapsutil.FlapsClient, app *fly.AppCompact, secrets map[string]string, args DeploymentArgs) error { + if err := appsecrets.Update(ctx, flapsClient, app.Name, secrets, nil); err != nil { + return fmt.Errorf("update secrets: %w", err) } - return DeploySecrets(ctx, app, stage, detach) + return DeploySecrets(ctx, app, args) } diff --git a/internal/command/secrets/sync.go b/internal/command/secrets/sync.go new file mode 100644 index 0000000000..236504d88c --- /dev/null +++ b/internal/command/secrets/sync.go @@ -0,0 +1,47 @@ +package secrets + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/appsecrets" + "github.com/superfly/flyctl/internal/command" + "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flapsutil" +) + +func newSync() (cmd *cobra.Command) { + const ( + long = `Sync flyctl with the latest versions of app secrets, even if they were set elsewhere` + short = long + usage = "sync [flags]" + ) + + cmd = command.New(usage, short, long, runSync, command.RequireSession, command.RequireAppName) + + flag.Add(cmd, + sharedFlags, + ) + + return cmd +} + +// runSync updates the app's minsecret version to the current point in time. +// Any secrets set previous to this point in time will be visible when flyctl +// deploys apps. This addresses an issue where flyctl maintains a local copy +// of the min secrets version for app secrets that it updates, but is not aware +// of the version set elsewhere, such as by the dashboard or another flyctl. +func runSync(ctx context.Context) (err error) { + appName := appconfig.NameFromContext(ctx) + ctx, flapsClient, app, err := flapsutil.SetClient(ctx, nil, appName) + if err != nil { + return err + } + + if err := appsecrets.Sync(ctx, flapsClient, app.Name); err != nil { + return fmt.Errorf("sync secrets: %w", err) + } + return nil +} diff --git a/internal/command/secrets/unset.go b/internal/command/secrets/unset.go index a51d29e9d1..63e0f4e11a 100644 --- a/internal/command/secrets/unset.go +++ b/internal/command/secrets/unset.go @@ -2,13 +2,15 @@ package secrets import ( "context" + "fmt" "github.com/spf13/cobra" fly "github.com/superfly/fly-go" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/flag" - "github.com/superfly/flyctl/internal/flyutil" + "github.com/superfly/flyctl/internal/flapsutil" ) func newUnset() (cmd *cobra.Command) { @@ -30,21 +32,23 @@ func newUnset() (cmd *cobra.Command) { } func runUnset(ctx context.Context) (err error) { - client := flyutil.ClientFromContext(ctx) appName := appconfig.NameFromContext(ctx) - app, err := client.GetAppCompact(ctx, appName) + ctx, flapsClient, app, err := flapsutil.SetClient(ctx, nil, appName) if err != nil { return err } - return UnsetSecretsAndDeploy(ctx, app, flag.Args(ctx), flag.GetBool(ctx, "stage"), flag.GetBool(ctx, "detach")) + return UnsetSecretsAndDeploy(ctx, flapsClient, app, flag.Args(ctx), DeploymentArgs{ + Stage: flag.GetBool(ctx, "stage"), + Detach: flag.GetBool(ctx, "detach"), + CheckDNS: flag.GetBool(ctx, "dns-checks"), + }) } -func UnsetSecretsAndDeploy(ctx context.Context, app *fly.AppCompact, secrets []string, stage bool, detach bool) error { - client := flyutil.ClientFromContext(ctx) - if _, err := client.UnsetSecrets(ctx, app.Name, secrets); err != nil { - return err +func UnsetSecretsAndDeploy(ctx context.Context, flapsClient flapsutil.FlapsClient, app *fly.AppCompact, secrets []string, args DeploymentArgs) error { + if err := appsecrets.Update(ctx, flapsClient, app.Name, nil, secrets); err != nil { + return fmt.Errorf("update secrets: %w", err) } - return DeploySecrets(ctx, app, stage, detach) + return DeploySecrets(ctx, app, args) } diff --git a/internal/command/ssh/connect.go b/internal/command/ssh/connect.go index 0a76601872..426f6b72d2 100644 --- a/internal/command/ssh/connect.go +++ b/internal/command/ssh/connect.go @@ -14,48 +14,19 @@ import ( "github.com/superfly/flyctl/agent" "github.com/superfly/flyctl/helpers" "github.com/superfly/flyctl/internal/flyutil" - "github.com/superfly/flyctl/iostreams" "github.com/superfly/flyctl/ssh" "github.com/superfly/flyctl/terminal" ) const DefaultSshUsername = "root" -func BringUpAgent(ctx context.Context, client flyutil.Client, app *fly.AppCompact, network string, quiet bool) (*agent.Client, agent.Dialer, error) { - io := iostreams.FromContext(ctx) - - agentclient, err := agent.Establish(ctx, client) - if err != nil { - captureError(ctx, err, app) - return nil, nil, errors.Wrap(err, "can't establish agent") - } - - dialer, err := agentclient.Dialer(ctx, app.Organization.Slug, network) - if err != nil { - captureError(ctx, err, app) - return nil, nil, fmt.Errorf("ssh: can't build tunnel for %s: %s\n", app.Organization.Slug, err) - } - - if !quiet { - io.StartProgressIndicatorMsg("Connecting to tunnel") - } - if err := agentclient.WaitForTunnel(ctx, app.Organization.Slug, network); err != nil { - captureError(ctx, err, app) - return nil, nil, errors.Wrapf(err, "tunnel unavailable") - } - if !quiet { - io.StopProgressIndicator() - } - - return agentclient, dialer, nil -} - type ConnectParams struct { Ctx context.Context Org fly.OrganizationImpl Username string Dialer agent.Dialer DisableSpinner bool + Container string AppNames []string } diff --git a/internal/command/ssh/console.go b/internal/command/ssh/console.go index 0e8f1638c6..a27dc1ca86 100644 --- a/internal/command/ssh/console.go +++ b/internal/command/ssh/console.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "runtime" + "slices" "time" "github.com/docker/docker/pkg/ioutils" @@ -61,6 +62,10 @@ func stdArgsSSH(cmd *cobra.Command) { Shorthand: "A", Description: "Address of VM to connect to", }, + flag.String{ + Name: "container", + Description: "Container to connect to", + }, flag.Bool{ Name: "pty", Description: "Allocate a pseudo-terminal (default: on when no command is provided)", @@ -79,17 +84,34 @@ func quiet(ctx context.Context) bool { return flag.GetBool(ctx, "quiet") } -func lookupAddress(ctx context.Context, cli *agent.Client, dialer agent.Dialer, app *fly.AppCompact, console bool) (addr string, err error) { - addr, err = addrForMachines(ctx, app, console) +func lookupAddressAndContainer(ctx context.Context, cli *agent.Client, dialer agent.Dialer, app *fly.AppCompact, console bool) (addr string, container string, err error) { + selectedMachine, err := selectMachine(ctx, app) if err != nil { - return + return "", "", err + } + + container, err = selectContainer(ctx, selectedMachine) + if err != nil { + return "", "", err + } + + if addr = flag.GetString(ctx, "address"); addr != "" { + return addr, container, nil + } + + if addr == "" { + if console && len(flag.Args(ctx)) != 0 { + addr = flag.Args(ctx)[0] + } else { + addr = selectedMachine.PrivateIP + } } // wait for the addr to be resolved in dns unless it's an ip address if !ip.IsV6(addr) { if err := cli.WaitForDNS(ctx, dialer, app.Organization.Slug, addr, ""); err != nil { captureError(ctx, err, app) - return "", errors.Wrapf(err, "host unavailable at %s", addr) + return "", "", errors.Wrapf(err, "host unavailable at %s", addr) } } @@ -150,12 +172,12 @@ func runConsole(ctx context.Context) error { return fmt.Errorf("get app network: %w", err) } - agentclient, dialer, err := BringUpAgent(ctx, client, app, *network, quiet(ctx)) + agentclient, dialer, err := agent.BringUpAgent(ctx, client, app, *network, quiet(ctx)) if err != nil { return err } - addr, err := lookupAddress(ctx, agentclient, dialer, app, true) + addr, container, err := lookupAddressAndContainer(ctx, agentclient, dialer, app, true) if err != nil { return err } @@ -177,6 +199,7 @@ func runConsole(ctx context.Context) error { Dialer: dialer, Username: flag.GetString(ctx, "user"), DisableSpinner: quiet(ctx), + Container: container, AppNames: []string{app.Name}, } sshc, err := Connect(params, addr) @@ -185,7 +208,7 @@ func runConsole(ctx context.Context) error { return err } - if err := Console(ctx, sshc, cmd, allocPTY); err != nil { + if err := Console(ctx, sshc, cmd, allocPTY, params.Container); err != nil { captureError(ctx, err, app) return err } @@ -193,7 +216,7 @@ func runConsole(ctx context.Context) error { return nil } -func Console(ctx context.Context, sshClient *ssh.Client, cmd string, allocPTY bool) error { +func Console(ctx context.Context, sshClient *ssh.Client, cmd string, allocPTY bool, container string) error { currentStdin, currentStdout, currentStderr, err := setupConsole() defer func() error { if err := cleanupConsole(currentStdin, currentStdout, currentStderr); err != nil { @@ -214,26 +237,26 @@ func Console(ctx context.Context, sshClient *ssh.Client, cmd string, allocPTY bo TermEnv: determineTermEnv(), } - if err := sshClient.Shell(ctx, sessIO, cmd); err != nil { + if err := sshClient.Shell(ctx, sessIO, cmd, container); err != nil { return errors.Wrap(err, "ssh shell") } return err } -func addrForMachines(ctx context.Context, app *fly.AppCompact, console bool) (addr string, err error) { +func selectMachine(ctx context.Context, app *fly.AppCompact) (machine *fly.Machine, err error) { out := iostreams.FromContext(ctx).Out flapsClient, err := flapsutil.NewClientWithOptions(ctx, flaps.NewClientOpts{ AppCompact: app, AppName: app.Name, }) if err != nil { - return "", err + return nil, err } machines, err := flapsClient.ListActive(ctx) if err != nil { - return "", err + return nil, err } machines = lo.Filter(machines, func(m *fly.Machine, _ int) bool { @@ -241,7 +264,7 @@ func addrForMachines(ctx context.Context, app *fly.AppCompact, console bool) (ad }) if len(machines) < 1 { - return "", fmt.Errorf("app %s has no started VMs.\nIt may be unhealthy or not have been deployed yet.\nTry the following command to verify:\n\nfly status", app.Name) + return nil, fmt.Errorf("app %s has no started VMs.\nIt may be unhealthy or not have been deployed yet.\nTry the following command to verify:\n\nfly status", app.Name) } if region := flag.GetRegion(ctx); region != "" { @@ -249,7 +272,7 @@ func addrForMachines(ctx context.Context, app *fly.AppCompact, console bool) (ad return m.Region == region }) if len(machines) < 1 { - return "", fmt.Errorf("app %s has no VMs in region %s", app.Name, region) + return nil, fmt.Errorf("app %s has no VMs in region %s", app.Name, region) } } @@ -258,7 +281,7 @@ func addrForMachines(ctx context.Context, app *fly.AppCompact, console bool) (ad return m.ProcessGroup() == group }) if len(machines) < 1 { - return "", fmt.Errorf("app %s has no VMs in process group %s", app.Name, group) + return nil, fmt.Errorf("app %s has no VMs in process group %s", app.Name, group) } } @@ -295,15 +318,23 @@ func addrForMachines(ctx context.Context, app *fly.AppCompact, console bool) (ad namesWithRegion = append(namesWithRegion, nameWithRegion) } + if machineID != "" && selectedMachine == nil { + return nil, fmt.Errorf("--machine=%q not found/started", machineID) + } + if flag.GetBool(ctx, "select") { if flag.IsSpecified(ctx, "machine") { - return "", errors.New("--machine can't be used with -s/--select") + return nil, errors.New("--machine can't be used with -s/--select") } selected := 0 - if prompt.Select(ctx, &selected, "Select VM:", "", namesWithRegion...); err != nil { - return "", fmt.Errorf("selecting VM: %w", err) + if len(namesWithRegion) > 1 { + if err = prompt.Select(ctx, &selected, "Select VM:", "", namesWithRegion...); err != nil { + return nil, fmt.Errorf("selecting VM: %w", err) + } + } else if len(machines) == 1 { + fmt.Fprintf(out, "Only one machine available, selecting %s in region %s\n", machines[0].ID, machines[0].Region) } selectedMachine = machines[selected] @@ -314,33 +345,75 @@ func addrForMachines(ctx context.Context, app *fly.AppCompact, console bool) (ad fmt.Fprintf(out, "Starting machine %s..", selectedMachine.ID) _, err := flapsClient.Start(ctx, selectedMachine.ID, "") if err != nil { - return "", err + return nil, err } err = flapsClient.Wait(ctx, selectedMachine, "started", 60*time.Second) if err != nil { - return "", err + return nil, err } } } - if addr = flag.GetString(ctx, "address"); addr != "" { - return addr, nil - } + // No VM was selected or passed as an argument, so just pick the first one for now + // Later, we might want to use 'nearest.of' but also resolve the machine IP to be able to start it + + if selectedMachine == nil { + selectedMachine = machines[0] - if console { - if len(flag.Args(ctx)) != 0 { - return flag.Args(ctx)[0], nil + if len(machines) > 1 || flag.GetBool(ctx, "select") { + fmt.Fprintf(out, "No machine specified, using %s in region %s\n", selectedMachine.ID, selectedMachine.Region) } } - if selectedMachine == nil { - selectedMachine = machines[0] + return selectedMachine, nil +} + +// selectContainer selects a container from the machine's config. +func selectContainer(ctx context.Context, machine *fly.Machine) (container string, err error) { + containers := machine.Config.Containers + container = flag.GetString(ctx, "container") + + if len(containers) == 0 { + if container == "" { + return "", nil + } else { + return "", fmt.Errorf("no containers found for machine %s", machine.ID) + } + } else if len(containers) == 1 { + if container == "" || container == containers[0].Name { + return containers[0].Name, nil + } else { + return "", fmt.Errorf("container named %s is not present in machine %s, try running with --select to see a list", container, machine.ID) + } + } else { + var availableContainers []string + for _, c := range containers { + availableContainers = append(availableContainers, c.Name) + } + + if container == "" { + selected := 0 + if len(availableContainers) > 1 { + if flag.GetBool(ctx, "select") { + if err = prompt.Select(ctx, &selected, "Select container:", "", availableContainers...); err != nil { + return "", fmt.Errorf("selecting container: %w", err) + } + } else { + fmt.Printf("No container specified, using %s\n", availableContainers[0]) + } + } + return availableContainers[selected], nil + } else { + if slices.Contains(availableContainers, container) { + return container, nil + } else { + return "", fmt.Errorf("container named %s is not present in machine %s, try running with --select to see a list", container, machine.ID) + } + } } - // No VM was selected or passed as an argument, so just pick the first one for now - // Later, we might want to use 'nearest.of' but also resolve the machine IP to be able to start it - return selectedMachine.PrivateIP, nil + } const defaultTermEnv = "xterm" diff --git a/internal/command/ssh/sftp.go b/internal/command/ssh/sftp.go index 226e1ac5ec..ed5ca17522 100644 --- a/internal/command/ssh/sftp.go +++ b/internal/command/ssh/sftp.go @@ -15,6 +15,7 @@ import ( "github.com/pkg/sftp" "github.com/spf13/cobra" + "github.com/superfly/flyctl/agent" "github.com/superfly/flyctl/internal/appconfig" "github.com/superfly/flyctl/internal/command" "github.com/superfly/flyctl/internal/flag" @@ -37,6 +38,7 @@ func NewSFTP() *cobra.Command { newFind(), newSFTPShell(), newGet(), + newPut(), ) return cmd @@ -74,13 +76,53 @@ func newGet() *cobra.Command { const ( long = `The SFTP GET retrieves a file from a remote VM.` short = long - usage = "get " + usage = "get [local-path]" ) cmd := command.New(usage, short, long, runGet, command.RequireSession, command.RequireAppName) cmd.Args = cobra.MaximumNArgs(2) + flag.Add(cmd, + flag.Bool{ + Name: "recursive", + Shorthand: "R", + Description: "Download directories recursively", + Default: false, + }, + ) + + stdArgsSSH(cmd) + + return cmd +} + +func newPut() *cobra.Command { + const ( + long = `The SFTP PUT uploads a file to a remote VM.` + short = long + usage = "put [remote-path]" + ) + + cmd := command.New(usage, short, long, runPut, command.RequireSession, command.RequireAppName) + + cmd.Args = cobra.RangeArgs(1, 2) + + flag.Add(cmd, + flag.String{ + Name: "mode", + Shorthand: "m", + Description: "File mode/permissions for the uploaded file (default: 0644)", + Default: "0644", + }, + flag.Bool{ + Name: "recursive", + Shorthand: "R", + Description: "Upload directories recursively", + Default: false, + }, + ) + stdArgsSSH(cmd) return cmd @@ -100,12 +142,12 @@ func newSFTPConnection(ctx context.Context) (*sftp.Client, error) { return nil, fmt.Errorf("get app network: %w", err) } - agentclient, dialer, err := BringUpAgent(ctx, client, app, *network, quiet(ctx)) + agentclient, dialer, err := agent.BringUpAgent(ctx, client, app, *network, quiet(ctx)) if err != nil { return nil, err } - addr, err := lookupAddress(ctx, agentclient, dialer, app, false) + addr, container, err := lookupAddressAndContainer(ctx, agentclient, dialer, app, false) if err != nil { return nil, err } @@ -116,6 +158,7 @@ func newSFTPConnection(ctx context.Context) (*sftp.Client, error) { Dialer: dialer, Username: DefaultSshUsername, DisableSpinner: true, + Container: container, AppNames: []string{app.Name}, } @@ -183,6 +226,20 @@ func runGet(ctx context.Context) error { return err } + // Check if remote is a directory + remoteInfo, err := ftp.Stat(remote) + if err != nil { + return fmt.Errorf("get: remote path %s: %w", remote, err) + } + + if remoteInfo.IsDir() { + recursive := flag.GetBool(ctx, "recursive") + if !recursive { + return fmt.Errorf("remote path %s is a directory. Use -R/--recursive flag to download directories", remote) + } + return runGetDir(ctx, ftp, remote, local) + } + rf, err := ftp.Open(remote) if err != nil { return fmt.Errorf("get: remote file %s: %w", remote, err) @@ -204,6 +261,307 @@ func runGet(ctx context.Context) error { return f.Sync() } +func runGetDir(ctx context.Context, ftp *sftp.Client, remote, local string) error { + // Check if target directory already exists + if _, err := os.Stat(local); err == nil { + return fmt.Errorf("directory %s already exists. flyctl sftp doesn't override existing directories for safety", local) + } + + // Create temporary ZIP file + tempZip, err := os.CreateTemp("", "flyctl-sftp-*.zip") + if err != nil { + return fmt.Errorf("create temporary zip file: %w", err) + } + defer os.Remove(tempZip.Name()) // Clean up temp file + defer tempZip.Close() + + z := zip.NewWriter(tempZip) + walker := ftp.Walk(remote) + totalBytes := int64(0) + + // Download all files into ZIP + for walker.Step() { + if err = walker.Err(); err != nil { + return fmt.Errorf("walk remote directory: %w", err) + } + + rfpath := walker.Path() + + inf, err := ftp.Stat(rfpath) + if err != nil { + fmt.Printf("warning: stat %s: %s\n", rfpath, err) + continue + } + + if inf.IsDir() { + continue + } + + rf, err := ftp.Open(rfpath) + if err != nil { + fmt.Printf("warning: open %s: %s\n", rfpath, err) + continue + } + + // Create relative path for ZIP entry + relPath := strings.TrimPrefix(rfpath, remote) + relPath = strings.TrimPrefix(relPath, "/") + if relPath == "" { + relPath = filepath.Base(rfpath) + } + + zf, err := z.Create(relPath) + if err != nil { + rf.Close() + fmt.Printf("warning: create zip entry %s: %s\n", relPath, err) + continue + } + + bytes, err := rf.WriteTo(zf) + if err != nil { + fmt.Printf("warning: write %s: %s (%d bytes written)\n", relPath, err, bytes) + } else { + fmt.Printf("downloaded %s (%d bytes)\n", relPath, bytes) + } + totalBytes += bytes + + rf.Close() + } + + // Close ZIP writer and temp file + z.Close() + tempZip.Close() + + // Extract ZIP to target directory + err = extractZip(tempZip.Name(), local) + if err != nil { + return fmt.Errorf("extract directory: %w", err) + } + + fmt.Printf("extracted %d bytes to %s/\n", totalBytes, local) + return nil +} + +func extractZip(src, dest string) error { + r, err := zip.OpenReader(src) + if err != nil { + return err + } + defer r.Close() + + // Create destination directory + err = os.MkdirAll(dest, 0755) + if err != nil { + return err + } + + // Extract files + for _, f := range r.File { + path := filepath.Join(dest, f.Name) + + // Security check: ensure path is within destination + if !strings.HasPrefix(path, filepath.Clean(dest)+string(os.PathSeparator)) { + return fmt.Errorf("invalid file path: %s", f.Name) + } + + if f.FileInfo().IsDir() { + err = os.MkdirAll(path, f.FileInfo().Mode()) + if err != nil { + return err + } + continue + } + + // Create parent directories + err = os.MkdirAll(filepath.Dir(path), 0755) + if err != nil { + return err + } + + // Extract file + rc, err := f.Open() + if err != nil { + return err + } + + outFile, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.FileInfo().Mode()) + if err != nil { + rc.Close() + return err + } + + _, err = io.Copy(outFile, rc) + outFile.Close() + rc.Close() + + if err != nil { + return err + } + } + + return nil +} + +func runPut(ctx context.Context) error { + args := flag.Args(ctx) + + var local, remote string + + switch len(args) { + case 0: + fmt.Printf("put [remote-path]\n") + return nil + + case 1: + local = args[0] + remote = filepath.Base(local) + + default: + local = args[0] + remote = args[1] + } + + // Parse file mode + modeStr := flag.GetString(ctx, "mode") + mode, err := strconv.ParseInt(modeStr, 8, 16) + if err != nil { + return fmt.Errorf("invalid file mode '%s': %w", modeStr, err) + } + + // Check if local file exists and is readable + localInfo, err := os.Stat(local) + if err != nil { + return fmt.Errorf("local file %s: %w", local, err) + } + + ftp, err := newSFTPConnection(ctx) + if err != nil { + return err + } + + if localInfo.IsDir() { + recursive := flag.GetBool(ctx, "recursive") + if !recursive { + return fmt.Errorf("local path %s is a directory. Use -R/--recursive flag to upload directories", local) + } + return runPutDir(ctx, ftp, local, remote, fs.FileMode(mode)) + } + + // Check if remote file already exists + if _, err := ftp.Stat(remote); err == nil { + return fmt.Errorf("remote file %s already exists. flyctl sftp doesn't overwrite existing files for safety", remote) + } + + // Open local file + localFile, err := os.Open(local) + if err != nil { + return fmt.Errorf("open local file %s: %w", local, err) + } + defer localFile.Close() + + // Create remote file + remoteFile, err := ftp.OpenFile(remote, os.O_WRONLY|os.O_CREATE|os.O_EXCL) + if err != nil { + return fmt.Errorf("create remote file %s: %w", remote, err) + } + defer remoteFile.Close() + + // Copy file contents + bytes, err := remoteFile.ReadFrom(localFile) + if err != nil { + return fmt.Errorf("copy file: %w (%d bytes written)", err, bytes) + } + + // Set file permissions + if err = ftp.Chmod(remote, fs.FileMode(mode)); err != nil { + return fmt.Errorf("set file permissions: %w", err) + } + + fmt.Printf("%d bytes uploaded to %s\n", bytes, remote) + return nil +} + +func runPutDir(ctx context.Context, ftp *sftp.Client, localDir, remoteDir string, mode fs.FileMode) error { + // Check if remote directory already exists + if _, err := ftp.Stat(remoteDir); err == nil { + return fmt.Errorf("remote directory %s already exists. flyctl sftp doesn't overwrite existing directories for safety", remoteDir) + } + + totalBytes := int64(0) + totalFiles := 0 + + err := filepath.Walk(localDir, func(localPath string, info os.FileInfo, err error) error { + if err != nil { + return fmt.Errorf("walk local directory: %w", err) + } + + // Create relative path for remote + relPath, err := filepath.Rel(localDir, localPath) + if err != nil { + return fmt.Errorf("get relative path: %w", err) + } + + remotePath := filepath.Join(remoteDir, relPath) + // Convert to forward slashes for remote paths + remotePath = strings.ReplaceAll(remotePath, "\\", "/") + + if info.IsDir() { + // Create remote directory + err := ftp.MkdirAll(remotePath) + if err != nil { + return fmt.Errorf("create remote directory %s: %w", remotePath, err) + } + fmt.Printf("created directory %s\n", remotePath) + } else { + // Create parent directories if they don't exist + remoteDir := filepath.Dir(remotePath) + remoteDir = strings.ReplaceAll(remoteDir, "\\", "/") + if remoteDir != "." { + err := ftp.MkdirAll(remoteDir) + if err != nil { + return fmt.Errorf("create parent directory %s: %w", remoteDir, err) + } + } + + // Upload file + localFile, err := os.Open(localPath) + if err != nil { + return fmt.Errorf("open local file %s: %w", localPath, err) + } + defer localFile.Close() + + remoteFile, err := ftp.OpenFile(remotePath, os.O_WRONLY|os.O_CREATE|os.O_EXCL) + if err != nil { + return fmt.Errorf("create remote file %s: %w", remotePath, err) + } + defer remoteFile.Close() + + bytes, err := remoteFile.ReadFrom(localFile) + if err != nil { + return fmt.Errorf("copy file %s: %w (%d bytes written)", localPath, err, bytes) + } + + // Set file permissions + if err = ftp.Chmod(remotePath, mode); err != nil { + fmt.Printf("warning: set permissions for %s: %s\n", remotePath, err) + } + + fmt.Printf("uploaded %s (%d bytes)\n", remotePath, bytes) + totalBytes += bytes + totalFiles++ + } + + return nil + }) + + if err != nil { + return err + } + + fmt.Printf("%d files uploaded (%d bytes total) to %s\n", totalFiles, totalBytes, remoteDir) + return nil +} + var completer = readline.NewPrefixCompleter( readline.PcItem("ls"), readline.PcItem("cd"), diff --git a/internal/command/ssh/ssh_terminal.go b/internal/command/ssh/ssh_terminal.go index 551cdc95da..a129d50da9 100644 --- a/internal/command/ssh/ssh_terminal.go +++ b/internal/command/ssh/ssh_terminal.go @@ -115,7 +115,7 @@ func SSHConnect(p *SSHParams, addr string) error { TermEnv: "xterm", } - if err := sshClient.Shell(context.Background(), sessIO, p.Cmd); err != nil { + if err := sshClient.Shell(context.Background(), sessIO, p.Cmd, ""); err != nil { return errors.Wrap(err, "ssh shell") } diff --git a/internal/command/status/machines.go b/internal/command/status/machines.go index 76f49d186a..ac83dad65f 100644 --- a/internal/command/status/machines.go +++ b/internal/command/status/machines.go @@ -116,7 +116,7 @@ func RenderMachineStatus(ctx context.Context, app *fly.AppCompact, out io.Writer continue } - latestImage, err := client.GetLatestImageDetails(ctx, image) + latestImage, err := client.GetLatestImageDetails(ctx, image, machine.ImageVersion()) if err != nil { if strings.Contains(err.Error(), "Unknown repository") { unknownRepos[image] = true @@ -304,7 +304,7 @@ func renderPGStatus(ctx context.Context, app *fly.AppCompact, machines []*fly.Ma for _, machine := range machines { image := fmt.Sprintf("%s:%s", machine.ImageRef.Repository, machine.ImageRef.Tag) - latestImage, err := client.GetLatestImageDetails(ctx, image) + latestImage, err := client.GetLatestImageDetails(ctx, image, machine.ImageVersion()) if err != nil && strings.Contains(err.Error(), "Unknown repository") { continue diff --git a/internal/command/version/upgrade.go b/internal/command/version/upgrade.go index 810b65b892..1fe0384b1a 100644 --- a/internal/command/version/upgrade.go +++ b/internal/command/version/upgrade.go @@ -7,12 +7,12 @@ import ( "fmt" "os" "os/exec" + "slices" "strings" "github.com/samber/lo" "github.com/spf13/cobra" "github.com/superfly/flyctl/terminal" - "golang.org/x/exp/slices" "github.com/superfly/flyctl/internal/buildinfo" "github.com/superfly/flyctl/internal/cache" diff --git a/internal/command/volumes/create.go b/internal/command/volumes/create.go index f56250d6c5..ee303a3efe 100644 --- a/internal/command/volumes/create.go +++ b/internal/command/volumes/create.go @@ -48,6 +48,11 @@ func newCreate() *cobra.Command { Default: 5, Description: "Snapshot retention in days", }, + flag.Bool{ + Name: "scheduled-snapshots", + Description: "Enable scheduled automatic snapshots", + Default: true, + }, flag.Bool{ Name: "no-encryption", Description: "Do not encrypt the volume contents. Volume contents are encrypted by default.", @@ -58,6 +63,11 @@ func newCreate() *cobra.Command { Description: "Place the volume in a separate hardware zone from existing volumes to help ensure availability", Default: true, }, + flag.Bool{ + Name: "unique-zone-app-wide", + Description: "Checks all volumes in app for unique zone handling, instead of only volumes with the same name (which is the default)", + Default: false, + }, flag.String{ Name: "snapshot-id", Description: "Create the volume from the specified snapshot", @@ -151,11 +161,17 @@ func runCreate(ctx context.Context) error { SizeGb: fly.Pointer(flag.GetInt(ctx, "size")), Encrypted: fly.Pointer(!flag.GetBool(ctx, "no-encryption")), RequireUniqueZone: fly.Pointer(flag.GetBool(ctx, "require-unique-zone")), + UniqueZoneAppWide: fly.Pointer(flag.GetBool(ctx, "unique-zone-app-wide")), SnapshotID: snapshotID, ComputeRequirements: computeRequirements, SnapshotRetention: fly.Pointer(flag.GetInt(ctx, "snapshot-retention")), FSType: fsType, } + + if flag.IsSpecified(ctx, "scheduled-snapshots") { + input.AutoBackupEnabled = fly.BoolPointer(flag.GetBool(ctx, "scheduled-snapshots")) + } + out := iostreams.FromContext(ctx).Out for i := 0; i < count; i++ { volume, err := flapsClient.CreateVolume(ctx, input) diff --git a/internal/command/volumes/fork.go b/internal/command/volumes/fork.go index 3464490df1..896a3e3877 100644 --- a/internal/command/volumes/fork.go +++ b/internal/command/volumes/fork.go @@ -46,6 +46,11 @@ func newFork() *cobra.Command { Description: "Place the volume in a separate hardware zone from existing volumes. This is the default.", Default: true, }, + flag.Bool{ + Name: "unique-zone-app-wide", + Description: "Checks all volumes in app for unique zone handling, instead of only volumes with the same name (which is the default)", + Default: false, + }, flag.String{ Name: "region", Shorthand: "r", @@ -116,6 +121,7 @@ func runFork(ctx context.Context) error { input := fly.CreateVolumeRequest{ Name: name, RequireUniqueZone: fly.Pointer(flag.GetBool(ctx, "require-unique-zone")), + UniqueZoneAppWide: fly.Pointer(flag.GetBool(ctx, "unique-zone-app-wide")), SourceVolumeID: &vol.ID, ComputeRequirements: computeRequirements, ComputeImage: attachedMachineImage, diff --git a/internal/command/volumes/lsvd/setup.go b/internal/command/volumes/lsvd/setup.go index af89fa8e99..76e6af7816 100644 --- a/internal/command/volumes/lsvd/setup.go +++ b/internal/command/volumes/lsvd/setup.go @@ -9,11 +9,12 @@ import ( "github.com/spf13/cobra" "github.com/superfly/flyctl/internal/appconfig" + "github.com/superfly/flyctl/internal/appsecrets" "github.com/superfly/flyctl/internal/command" extensions "github.com/superfly/flyctl/internal/command/extensions/core" "github.com/superfly/flyctl/internal/flag" + "github.com/superfly/flyctl/internal/flapsutil" "github.com/superfly/flyctl/internal/flyerr" - "github.com/superfly/flyctl/internal/flyutil" "github.com/superfly/flyctl/internal/prompt" "github.com/superfly/flyctl/iostreams" ) @@ -32,10 +33,14 @@ func newSetup() *cobra.Command { func runSetup(ctx context.Context) error { appName := appconfig.NameFromContext(ctx) - client := flyutil.ClientFromContext(ctx) + ctx, flapsClient, app, err := flapsutil.SetClient(ctx, nil, appName) + if err != nil { + return err + } + io := iostreams.FromContext(ctx) - secrets, err := client.GetAppSecrets(ctx, appName) + secrets, err := appsecrets.List(ctx, flapsClient, app.Name) if err != nil { return err } @@ -200,18 +205,8 @@ func runSetup(ctx context.Context) error { deletedSecrets = append(deletedSecrets, "FLY_LSVD_MOUNT_POINT") } - if len(deletedSecrets) > 0 { - _, err = client.UnsetSecrets(ctx, appName, deletedSecrets) - if err != nil { - return err - } - } - - if len(newSecrets) > 0 { - _, err = client.SetSecrets(ctx, appName, newSecrets) - if err != nil { - return err - } + if err := appsecrets.Update(ctx, flapsClient, app.Name, newSecrets, deletedSecrets); err != nil { + return err } fmt.Fprintln( diff --git a/internal/command/volumes/snapshots/list.go b/internal/command/volumes/snapshots/list.go index 77dabfa75b..85a20366ad 100644 --- a/internal/command/volumes/snapshots/list.go +++ b/internal/command/volumes/snapshots/list.go @@ -8,8 +8,8 @@ import ( "time" "github.com/dustin/go-humanize" + "github.com/olekukonko/tablewriter" "github.com/spf13/cobra" - fly "github.com/superfly/fly-go" "github.com/superfly/fly-go/flaps" "github.com/superfly/flyctl/internal/appconfig" "github.com/superfly/flyctl/internal/command" @@ -58,14 +58,12 @@ func runList(ctx context.Context) error { volID := flag.FirstArg(ctx) appName := appconfig.NameFromContext(ctx) - var volState string if appName == "" { - n, s, err := client.GetAppNameStateFromVolume(ctx, volID) + n, err := client.GetAppNameFromVolume(ctx, volID) if err != nil { return fmt.Errorf("failed getting app name from volume: %w", err) } appName = *n - volState = *s } flapsClient, err := flapsutil.NewClientWithOptions(ctx, flaps.NewClientOpts{ @@ -75,17 +73,16 @@ func runList(ctx context.Context) error { return err } - var snapshots []fly.VolumeSnapshot - switch volState { - case "pending_destroy", "deleted": - snapshots, err = client.GetSnapshotsFromVolume(ctx, volID) - default: - snapshots, err = flapsClient.GetVolumeSnapshots(ctx, volID) - } + snapshots, err := flapsClient.GetVolumeSnapshots(ctx, volID) if err != nil { return fmt.Errorf("failed retrieving snapshots: %w", err) } + // sort snapshots from oldest to newest + sort.Slice(snapshots, func(i, j int) bool { + return snapshots[i].CreatedAt.Before(snapshots[j].CreatedAt) + }) + if cfg.JSONOutput { return render.JSON(io.Out, snapshots) } @@ -95,12 +92,8 @@ func runList(ctx context.Context) error { return nil } - // sort snapshots from newest to oldest - sort.Slice(snapshots, func(i, j int) bool { - return snapshots[i].CreatedAt.After(snapshots[j].CreatedAt) - }) - rows := make([][]string, 0, len(snapshots)) + var totalStoredSize uint64 for _, snapshot := range snapshots { id := snapshot.ID if id == "" { @@ -111,14 +104,33 @@ func runList(ctx context.Context) error { if snapshot.RetentionDays != nil { retentionDays = strconv.Itoa(*snapshot.RetentionDays) } + + storedSize := humanize.IBytes(uint64(snapshot.Size)) + volSize := humanize.IBytes(uint64(snapshot.VolumeSize)) + totalStoredSize += uint64(snapshot.Size) + rows = append(rows, []string{ id, snapshot.Status, - strconv.Itoa(snapshot.Size), + storedSize, + volSize, timeToString(snapshot.CreatedAt), retentionDays, }) } - return render.Table(io.Out, "Snapshots", rows, "ID", "Status", "Size", "Created At", "Retention Days") + table := render.NewTable(io.Out, "Snapshots", rows, "ID", "Status", "Stored Size", "Vol Size", "Created At", "Retention Days") + table.SetColumnAlignment([]int{ + tablewriter.ALIGN_DEFAULT, // ID + tablewriter.ALIGN_DEFAULT, // Status + tablewriter.ALIGN_RIGHT, // Stored Size + tablewriter.ALIGN_RIGHT, // Vol Size + tablewriter.ALIGN_DEFAULT, // Created At + tablewriter.ALIGN_RIGHT, // Retention Days + }) + table.Render() + + fmt.Fprintf(io.Out, "\nTotal stored size: %s\n", humanize.IBytes(totalStoredSize)) + + return nil } diff --git a/internal/command/wireguard/root.go b/internal/command/wireguard/root.go index 4d21a46e49..8cdd71f73f 100644 --- a/internal/command/wireguard/root.go +++ b/internal/command/wireguard/root.go @@ -49,6 +49,12 @@ func newWireguardCreate() *cobra.Command { command.RequireSession, ) cmd.Args = cobra.MaximumNArgs(4) + flag.Add(cmd, + flag.String{ + Name: "network", + Description: "Custom network name", + }, + ) return cmd } diff --git a/internal/command/wireguard/wireguard.go b/internal/command/wireguard/wireguard.go index 1f355661aa..76cb97eb8d 100644 --- a/internal/command/wireguard/wireguard.go +++ b/internal/command/wireguard/wireguard.go @@ -141,8 +141,7 @@ func runWireguardCreate(ctx context.Context) error { name = args[2] } - // TODO: allow custom network - network := "" + network := flag.GetString(ctx, "network") state, err := wireguard.Create(apiClient, org, region, name, network, "static") if err != nil { diff --git a/internal/config/config.go b/internal/config/config.go index ef454f13f0..018957db99 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -31,6 +31,7 @@ const ( SendMetricsFileKey = "send_metrics" SyntheticsAgentFileKey = "synthetics_agent" AutoUpdateFileKey = "auto_update" + AppSecretsMinverFileKey = "app_secrets_minvers" WireGuardStateFileKey = "wire_guard_state" WireGuardWebsocketsFileKey = "wire_guard_websockets" APITokenEnvKey = "FLY_API_TOKEN" @@ -98,6 +99,9 @@ type Config struct { // LocalOnly denotes whether the user wants only local operations. LocalOnly bool + // DisableManagedBuilders will make docker daemon type never be managed + DisableManagedBuilders bool + // Tokens is the user's authentication token(s). They are used differently // depending on where they need to be sent. Tokens *tokens.Tokens @@ -167,15 +171,17 @@ func (cfg *Config) applyFile(path string) (err error) { defer cfg.mu.Unlock() var w struct { - AccessToken string `yaml:"access_token"` - MetricsToken string `yaml:"metrics_token"` - SendMetrics bool `yaml:"send_metrics"` - AutoUpdate bool `yaml:"auto_update"` - SyntheticsAgent bool `yaml:"synthetics_agent"` + AccessToken string `yaml:"access_token"` + MetricsToken string `yaml:"metrics_token"` + SendMetrics bool `yaml:"send_metrics"` + AutoUpdate bool `yaml:"auto_update"` + SyntheticsAgent bool `yaml:"synthetics_agent"` + DisableManagedBuilders bool `yaml:"disable_managed_builders"` } w.SendMetrics = true w.AutoUpdate = true w.SyntheticsAgent = true + w.DisableManagedBuilders = false if err = unmarshal(path, &w); err == nil { cfg.Tokens = tokens.ParseFromFile(w.AccessToken, path) @@ -183,6 +189,7 @@ func (cfg *Config) applyFile(path string) (err error) { cfg.SendMetrics = w.SendMetrics cfg.AutoUpdate = w.AutoUpdate cfg.SyntheticsAgent = w.SyntheticsAgent + cfg.DisableManagedBuilders = w.DisableManagedBuilders } return diff --git a/internal/config/file.go b/internal/config/file.go index c17aa8c37a..77c678e0ea 100644 --- a/internal/config/file.go +++ b/internal/config/file.go @@ -77,13 +77,22 @@ func SetWireGuardWebsocketsEnabled(path string, enabled bool) error { }) } +type AppSecretsMinvers map[string]uint64 + +func SetAppSecretsMinvers(path string, minvers AppSecretsMinvers) error { + return set(path, map[string]interface{}{ + AppSecretsMinverFileKey: minvers, + }) +} + // Clear clears the access token, metrics token, and wireguard-related keys of the configuration // file found at path. func Clear(path string) (err error) { return set(path, map[string]interface{}{ - AccessTokenFileKey: "", - MetricsTokenFileKey: "", - WireGuardStateFileKey: map[string]interface{}{}, + AccessTokenFileKey: "", + MetricsTokenFileKey: "", + WireGuardStateFileKey: map[string]interface{}{}, + AppSecretsMinverFileKey: AppSecretsMinvers{}, }) } diff --git a/internal/config/machine.go b/internal/config/machine.go new file mode 100644 index 0000000000..85196210bd --- /dev/null +++ b/internal/config/machine.go @@ -0,0 +1,144 @@ +package config + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "os" + "strings" + + "github.com/superfly/fly-go" +) + +func ParseConfig(config *fly.MachineConfig, mc string) error { + var buf []byte + switch { + case strings.HasPrefix(mc, "{"): + buf = []byte(mc) + case strings.HasSuffix(mc, ".json"): + fo, err := os.Open(mc) + if err != nil { + return err + } + buf, err = io.ReadAll(fo) + if err != nil { + return err + } + default: + return fmt.Errorf("invalid machine config source: %q", mc) + } + + if err := json.Unmarshal(buf, config); err != nil { + return fmt.Errorf("invalid machine config %q: %w", mc, err) + } + + if err := readLocalFiles(config, buf); err != nil { + return err + } + + return nil +} + +// readLocalFiles reads local files from the machine config and inserts their content into the config. +func readLocalFiles(config *fly.MachineConfig, buf []byte) error { + clean := true + + if config.Files != nil { + for _, file := range config.Files { + if file.RawValue == nil && file.SecretName == nil { + clean = false + } + } + } + + for _, container := range config.Containers { + if container.Files != nil { + for _, file := range container.Files { + if file.RawValue == nil && file.SecretName == nil { + clean = false + } + } + } + } + + if clean { + return nil + } + + // File represents a file configuration within a container + type LocalFile struct { + GuestPath string `json:"guest_path"` + LocalPath string `json:"local_path"` + } + + // Container represents a container configuration + type LocalContainer struct { + Name string `json:"name"` + Files []LocalFile `json:"files"` + } + + // Config represents the overall CLI configuration + type LocalConfig struct { + Files []LocalFile `json:"files"` + Containers []LocalContainer `json:"containers"` + } + + // Read the JSON file + var localConf LocalConfig + if err := json.Unmarshal(buf, &localConf); err != nil { + return fmt.Errorf("invalid machine config %s: %w", string(buf), err) + } + + if config.Files != nil { + for _, file := range config.Files { + if file.RawValue == nil && file.SecretName == nil { + for _, localFile := range localConf.Files { + if file.GuestPath == localFile.GuestPath { + if localFile.LocalPath == "" { + continue + } + + content, err := os.ReadFile(localFile.LocalPath) + if err != nil { + return fmt.Errorf("failed to read file at %s: %w", localFile.LocalPath, err) + } + + encodedContent := base64.StdEncoding.EncodeToString(content) + file.RawValue = &encodedContent + } + } + } + } + } + + for _, container := range config.Containers { + if container.Files != nil { + for _, file := range container.Files { + if file.RawValue == nil && file.SecretName == nil { + for _, localContainer := range localConf.Containers { + if container.Name == localContainer.Name { + for _, localFile := range localContainer.Files { + if file.GuestPath == localFile.GuestPath { + if localFile.LocalPath == "" { + continue + } + + content, err := os.ReadFile(localFile.LocalPath) + if err != nil { + return fmt.Errorf("failed to read file at %s: %w", localFile.LocalPath, err) + } + + encodedContent := base64.StdEncoding.EncodeToString(content) + file.RawValue = &encodedContent + } + } + } + } + } + } + } + } + + return nil +} diff --git a/internal/config/tokens.go b/internal/config/tokens.go index 6d67499680..ac68a498e4 100644 --- a/internal/config/tokens.go +++ b/internal/config/tokens.go @@ -4,8 +4,10 @@ import ( "context" "errors" "fmt" + "maps" "slices" "strconv" + "strings" "sync" "time" @@ -17,7 +19,6 @@ import ( "github.com/superfly/flyctl/internal/task" "github.com/superfly/macaroon" "github.com/superfly/macaroon/flyio" - "golang.org/x/exp/maps" ) // UserURLCallback is a function that opens a URL in the user's browser. This is @@ -42,7 +43,7 @@ func MonitorTokens(monitorCtx context.Context, t *tokens.Tokens, uucb UserURLCal log.Debugf("failed to fetch missing tokens org tokens: %s", err) } - updated2, err := refreshDischargeTokens(monitorCtx, t, uucb) + updated2, err := refreshDischargeTokens(monitorCtx, t, uucb, 30*time.Second) if err != nil { log.Debugf("failed to update discharge tokens: %s", err) } @@ -141,7 +142,7 @@ func keepConfigTokensFresh(ctx context.Context, m *sync.Mutex, t *tokens.Tokens, // don't continue. might have been partial success } - updated2, err := refreshDischargeTokens(ctx, localCopy, uucb) + updated2, err := refreshDischargeTokens(ctx, localCopy, uucb, 2*time.Minute) if err != nil { logger.Debugf("failed to update discharge tokens: %s", err) // don't continue. might have been partial success @@ -181,11 +182,21 @@ func keepConfigTokensFresh(ctx context.Context, m *sync.Mutex, t *tokens.Tokens, // the user's browser. Set the UserURLCallback package variable if you want to // support this. // -// Don't call this when other goroutines might also be accessing t. -func refreshDischargeTokens(ctx context.Context, t *tokens.Tokens, uucb UserURLCallback) (bool, error) { - updateOpts := []tokens.UpdateOption{tokens.WithDebugger(logger.FromContext(ctx))} +// Don't call this when other goroutines might also be accessing it. +func refreshDischargeTokens(ctx context.Context, t *tokens.Tokens, uucb UserURLCallback, advancePrune time.Duration) (bool, error) { + updateOpts := []tokens.UpdateOption{ + tokens.WithDebugger(logger.FromContext(ctx)), + tokens.WithAdvancePrune(advancePrune), + } if uucb != nil { + // Update without UserURLCallback to fetch tokens in parallel. + updated, err := t.Update(ctx, updateOpts...) + if err == nil || !strings.Contains(err.Error(), "missing user-url callback") { + return updated, err + } + + // Retry with UserURLCallback if we received a 'missing user-url callback' error. updateOpts = append(updateOpts, tokens.WithUserURLCallback(uucb)) } @@ -277,7 +288,7 @@ func doFetchOrgTokens(ctx context.Context, t *tokens.Tokens, fetchOrgs orgFetche defer wgLock.Unlock() macToks = append(macToks, m) } - for _, graphID := range maps.Values(graphIDByNumericID) { + for graphID := range maps.Values(graphIDByNumericID) { graphID := graphID wg.Add(1) diff --git a/internal/containerconfig/compose.go b/internal/containerconfig/compose.go new file mode 100644 index 0000000000..4e87731219 --- /dev/null +++ b/internal/containerconfig/compose.go @@ -0,0 +1,408 @@ +package containerconfig + +import ( + "encoding/base64" + "fmt" + "os" + "path/filepath" + "strings" + + fly "github.com/superfly/fly-go" + "gopkg.in/yaml.v3" +) + +// ComposeService represents a service definition in Docker Compose +type ComposeService struct { + Image string `yaml:"image"` + Build interface{} `yaml:"build"` + Environment map[string]string `yaml:"environment"` + Volumes []string `yaml:"volumes"` + Ports []string `yaml:"ports"` + Command interface{} `yaml:"command"` + Entrypoint interface{} `yaml:"entrypoint"` + WorkingDir string `yaml:"working_dir"` + User string `yaml:"user"` + Restart string `yaml:"restart"` + Configs []interface{} `yaml:"configs"` + Secrets []interface{} `yaml:"secrets"` + Deploy map[string]interface{} `yaml:"deploy"` + DependsOn interface{} `yaml:"depends_on"` + Healthcheck *ComposeHealthcheck `yaml:"healthcheck"` + Extra map[string]interface{} `yaml:",inline"` +} + +// ComposeDependency represents a service dependency with conditions +type ComposeDependency struct { + Condition string `yaml:"condition"` + Required bool `yaml:"required"` + Restart bool `yaml:"restart"` +} + +// ServiceDependencies represents parsed dependencies for a service +type ServiceDependencies struct { + Dependencies map[string]ComposeDependency +} + +// DependencyCondition constants +const ( + DependencyConditionStarted = "service_started" + DependencyConditionHealthy = "service_healthy" + DependencyConditionCompletedSuccessfully = "service_completed_successfully" +) + +// ComposeHealthcheck represents a health check configuration +type ComposeHealthcheck struct { + Test interface{} `yaml:"test"` + Interval string `yaml:"interval"` + Timeout string `yaml:"timeout"` + Retries int `yaml:"retries"` + StartPeriod string `yaml:"start_period"` +} + +// ComposeFile represents a Docker Compose file structure +type ComposeFile struct { + Version string `yaml:"version"` + Services map[string]ComposeService `yaml:"services"` + Volumes map[string]interface{} `yaml:"volumes"` + Networks map[string]interface{} `yaml:"networks"` + Configs map[string]interface{} `yaml:"configs"` + Secrets map[string]interface{} `yaml:"secrets"` +} + +// parseComposeFile reads and parses a Docker Compose YAML file +func parseComposeFile(composePath string) (*ComposeFile, error) { + data, err := os.ReadFile(composePath) + if err != nil { + return nil, fmt.Errorf("failed to read compose file: %w", err) + } + + var compose ComposeFile + if err := yaml.Unmarshal(data, &compose); err != nil { + return nil, fmt.Errorf("failed to parse compose file: %w", err) + } + + return &compose, nil +} + +// parseDependsOn parses both short and long syntax depends_on +func parseDependsOn(dependsOn interface{}) (ServiceDependencies, error) { + deps := ServiceDependencies{ + Dependencies: make(map[string]ComposeDependency), + } + + if dependsOn == nil { + return deps, nil + } + + switch v := dependsOn.(type) { + case []interface{}: + // Short syntax: depends_on: [db, redis] + for _, dep := range v { + if serviceName, ok := dep.(string); ok { + deps.Dependencies[serviceName] = ComposeDependency{ + Condition: DependencyConditionStarted, + Required: true, + Restart: false, + } + } + } + case map[string]interface{}: + // Long syntax: depends_on: { db: { condition: service_healthy } } + for serviceName, depConfig := range v { + dependency := ComposeDependency{ + Condition: DependencyConditionStarted, + Required: true, + Restart: false, + } + + if config, ok := depConfig.(map[string]interface{}); ok { + if condition, exists := config["condition"]; exists { + if condStr, ok := condition.(string); ok { + dependency.Condition = condStr + } + } + if required, exists := config["required"]; exists { + if reqBool, ok := required.(bool); ok { + dependency.Required = reqBool + } + } + if restart, exists := config["restart"]; exists { + if restartBool, ok := restart.(bool); ok { + dependency.Restart = restartBool + } + } + } + + deps.Dependencies[serviceName] = dependency + } + default: + return deps, fmt.Errorf("invalid depends_on format") + } + + return deps, nil +} + +// parseVolume parses a Docker Compose volume string +// Format: [HOST:]CONTAINER[:ro|:rw] +func parseVolume(volume string) (hostPath, containerPath string, readOnly bool) { + parts := strings.Split(volume, ":") + + switch len(parts) { + case 1: + // Just container path (anonymous volume) + return "", parts[0], false + case 2: + // Could be HOST:CONTAINER or CONTAINER:ro + if parts[1] == "ro" || parts[1] == "rw" { + return "", parts[0], parts[1] == "ro" + } + return parts[0], parts[1], false + case 3: + // HOST:CONTAINER:ro/rw + return parts[0], parts[1], parts[2] == "ro" + default: + // Invalid format, return container path from first part + return "", parts[0], false + } +} + +// convertHealthcheck converts a compose healthcheck to Fly healthcheck +func convertHealthcheck(composeHC *ComposeHealthcheck) *fly.ContainerHealthcheck { + if composeHC == nil { + return nil + } + + hc := &fly.ContainerHealthcheck{} + + // Parse test command + var cmd []string + switch test := composeHC.Test.(type) { + case string: + // HEALTHCHECK test + cmd = []string{test} + case []interface{}: + // ["CMD", "wget", "--spider", "localhost:80"] + for i, t := range test { + if str, ok := t.(string); ok { + // Skip "CMD" or "CMD-SHELL" prefix + if i == 0 && (str == "CMD" || str == "CMD-SHELL") { + continue + } + cmd = append(cmd, str) + } + } + } + + // Set up exec healthcheck + if len(cmd) > 0 { + hc.ContainerHealthcheckType = fly.ContainerHealthcheckType{ + Exec: &fly.ExecHealthcheck{ + Command: cmd, + }, + } + } + + // Parse durations - for now just use defaults + // In a real implementation, you'd parse "30s" -> 30, etc. + if composeHC.Interval != "" { + hc.Interval = 30 // Default 30s + } + if composeHC.Timeout != "" { + hc.Timeout = 10 // Default 10s + } + if composeHC.Retries > 0 { + hc.FailureThreshold = int32(composeHC.Retries) + } + + return hc +} + +// composeToMachineConfig converts a Docker Compose file to Fly machine configuration +// Always uses containers for compose files, regardless of service count +func composeToMachineConfig(mConfig *fly.MachineConfig, compose *ComposeFile, composePath string) error { + if len(compose.Services) == 0 { + return fmt.Errorf("no services defined in compose file") + } + + // Initialize empty slices/maps if they don't exist + if mConfig.Containers == nil { + mConfig.Containers = []*fly.ContainerConfig{} + } + if mConfig.Restart == nil { + mConfig.Restart = &fly.MachineRestart{} + } + + // Parse dependencies for all services + serviceDependencies := make(map[string]ServiceDependencies) + for serviceName, service := range compose.Services { + deps, err := parseDependsOn(service.DependsOn) + if err != nil { + return fmt.Errorf("failed to parse dependencies for service '%s': %w", serviceName, err) + } + serviceDependencies[serviceName] = deps + } + + // Create containers for all services + containers := make([]*fly.ContainerConfig, 0, len(compose.Services)) + + // Check that only one service specifies build + buildServiceCount := 0 + for _, service := range compose.Services { + if service.Build != nil { + buildServiceCount++ + } + } + if buildServiceCount > 1 { + return fmt.Errorf("only one service can specify build, found %d services with build", buildServiceCount) + } + + // Process all services as containers + for serviceName, service := range compose.Services { + container := &fly.ContainerConfig{ + Name: serviceName, + } + + // Set image + if service.Build != nil { + // Service with build section uses "." as image + container.Image = "." + } else if service.Image != "" { + container.Image = service.Image + } else { + // Services without build must specify image + return fmt.Errorf("service '%s' must specify either 'image' or 'build'", serviceName) + } + + // Handle environment variables + if len(service.Environment) > 0 { + container.ExtraEnv = make(map[string]string) + for k, v := range service.Environment { + container.ExtraEnv[k] = v + } + } + + // Handle compose-specific entrypoint/command if specified + if service.Entrypoint != nil { + switch ep := service.Entrypoint.(type) { + case string: + container.EntrypointOverride = []string{ep} + case []interface{}: + epSlice := make([]string, 0, len(ep)) + for _, e := range ep { + if str, ok := e.(string); ok { + epSlice = append(epSlice, str) + } + } + container.EntrypointOverride = epSlice + } + } + + if service.Command != nil { + switch cmd := service.Command.(type) { + case string: + container.CmdOverride = []string{cmd} + case []interface{}: + cmdSlice := make([]string, 0, len(cmd)) + for _, c := range cmd { + if str, ok := c.(string); ok { + cmdSlice = append(cmdSlice, str) + } + } + container.CmdOverride = cmdSlice + } + } + + // If no entrypoint/command specified in compose, let container use image defaults + + // Handle user + if service.User != "" { + container.UserOverride = service.User + } + + // Start with empty files list + files := []*fly.File{} + + // Handle volume mounts + for _, vol := range service.Volumes { + hostPath, containerPath, _ := parseVolume(vol) + if hostPath != "" { + // Make host path absolute if relative + if !filepath.IsAbs(hostPath) { + hostPath = filepath.Join(filepath.Dir(composePath), hostPath) + } + + // Read the file content + content, err := os.ReadFile(hostPath) + if err != nil { + // Log warning but continue + fmt.Printf("Warning: Could not read volume file %s: %v\n", hostPath, err) + continue + } + + // Add file to container + encodedContent := base64.StdEncoding.EncodeToString(content) + + files = append(files, &fly.File{ + GuestPath: containerPath, + RawValue: &encodedContent, + }) + } + } + + container.Files = files + + // Handle health checks + if service.Healthcheck != nil { + healthcheck := convertHealthcheck(service.Healthcheck) + if healthcheck != nil { + container.Healthchecks = []fly.ContainerHealthcheck{*healthcheck} + } + } + + // Handle dependencies + if deps, exists := serviceDependencies[serviceName]; exists && len(deps.Dependencies) > 0 { + var containerDeps []fly.ContainerDependency + for depName, dep := range deps.Dependencies { + var condition fly.ContainerDependencyCondition + switch dep.Condition { + case DependencyConditionStarted: + condition = fly.Started + case DependencyConditionHealthy: + condition = fly.Healthy + case DependencyConditionCompletedSuccessfully: + condition = fly.ExitedSuccessfully + default: + condition = fly.Started // default fallback + } + + containerDeps = append(containerDeps, fly.ContainerDependency{ + Name: depName, + Condition: condition, + }) + } + container.DependsOn = containerDeps + } + + containers = append(containers, container) + } + + mConfig.Containers = containers + + // Clear services - containers handle their own networking + mConfig.Services = nil + + // Clear the main image - containers have their own images + mConfig.Image = "" + + return nil +} + +// ParseComposeFileWithPath parses a Docker Compose file and converts it to machine config +func ParseComposeFileWithPath(mConfig *fly.MachineConfig, composePath string) error { + compose, err := parseComposeFile(composePath) + if err != nil { + return err + } + + return composeToMachineConfig(mConfig, compose, composePath) +} diff --git a/internal/containerconfig/compose_test.go b/internal/containerconfig/compose_test.go new file mode 100644 index 0000000000..709adfe12f --- /dev/null +++ b/internal/containerconfig/compose_test.go @@ -0,0 +1,541 @@ +package containerconfig + +import ( + "encoding/base64" + "os" + "path/filepath" + "strings" + "testing" + + fly "github.com/superfly/fly-go" +) + +func TestParseComposeFileWithPath(t *testing.T) { + // Create a temporary compose file for testing + tmpDir := t.TempDir() + composePath := filepath.Join(tmpDir, "compose.yml") + + composeContent := `version: "3" +services: + web: + image: nginx:latest + ports: + - "80:80" + environment: + ENV_VAR: value + restart: always +` + if err := os.WriteFile(composePath, []byte(composeContent), 0644); err != nil { + t.Fatalf("Failed to write test compose file: %v", err) + } + + // Parse the compose file + mConfig := &fly.MachineConfig{} + err := ParseComposeFileWithPath(mConfig, composePath) + if err != nil { + t.Fatalf("Failed to parse compose file: %v", err) + } + + // Verify the parsed configuration - now always uses containers + // Main image should be empty when using containers + if mConfig.Image != "" { + t.Errorf("Expected main image to be empty, got '%s'", mConfig.Image) + } + + // Should have one container + if len(mConfig.Containers) != 1 { + t.Errorf("Expected 1 container, got %d", len(mConfig.Containers)) + } + + // Check the container details + container := mConfig.Containers[0] + if container.Name != "web" { + t.Errorf("Expected container name 'web', got '%s'", container.Name) + } + + if container.Image != "nginx:latest" { + t.Errorf("Expected container image 'nginx:latest', got '%s'", container.Image) + } + + if container.ExtraEnv["ENV_VAR"] != "value" { + t.Errorf("Expected ENV_VAR='value', got '%s'", container.ExtraEnv["ENV_VAR"]) + } +} + +func TestParseComposeFileMultiService(t *testing.T) { + // Create a temporary compose file with multiple services + tmpDir := t.TempDir() + composePath := filepath.Join(tmpDir, "compose.yml") + + composeContent := `version: "3" +services: + app: + image: myapp:latest + environment: + APP_ENV: production + command: ["./start.sh"] + db: + image: postgres:14 + environment: + POSTGRES_PASSWORD: secret + cache: + image: redis:alpine +` + if err := os.WriteFile(composePath, []byte(composeContent), 0644); err != nil { + t.Fatalf("Failed to write test compose file: %v", err) + } + + // Parse the compose file - should succeed with containers + mConfig := &fly.MachineConfig{} + err := ParseComposeFileWithPath(mConfig, composePath) + if err != nil { + t.Fatalf("Failed to parse multi-service compose file: %v", err) + } + + // Verify the main image is empty when using containers + if mConfig.Image != "" { + t.Errorf("Expected main image to be empty, got '%s'", mConfig.Image) + } + + // Verify containers were created + if len(mConfig.Containers) != 3 { + t.Errorf("Expected 3 containers, got %d", len(mConfig.Containers)) + } + + // Check container details + containerNames := make(map[string]bool) + for _, container := range mConfig.Containers { + containerNames[container.Name] = true + + switch container.Name { + case "app": + if container.Image != "myapp:latest" { + t.Errorf("Expected app container image 'myapp:latest', got '%s'", container.Image) + } + if container.ExtraEnv["APP_ENV"] != "production" { + t.Errorf("Expected APP_ENV='production', got '%s'", container.ExtraEnv["APP_ENV"]) + } + if len(container.CmdOverride) == 0 || container.CmdOverride[0] != "./start.sh" { + t.Errorf("Expected command './start.sh', got %v", container.CmdOverride) + } + case "db": + if container.Image != "postgres:14" { + t.Errorf("Expected db container image 'postgres:14', got '%s'", container.Image) + } + if container.ExtraEnv["POSTGRES_PASSWORD"] != "secret" { + t.Errorf("Expected POSTGRES_PASSWORD='secret', got '%s'", container.ExtraEnv["POSTGRES_PASSWORD"]) + } + case "cache": + if container.Image != "redis:alpine" { + t.Errorf("Expected cache container image 'redis:alpine', got '%s'", container.Image) + } + } + } + + // Verify all expected containers exist + for _, name := range []string{"app", "db", "cache"} { + if !containerNames[name] { + t.Errorf("Expected container '%s' not found", name) + } + } +} + +func TestParseComposeFileMultiServiceNoApp(t *testing.T) { + // Create a compose file without an "app" service + tmpDir := t.TempDir() + composePath := filepath.Join(tmpDir, "compose.yml") + + composeContent := `version: "3" +services: + web: + image: nginx:latest + backend: + image: api:latest +` + if err := os.WriteFile(composePath, []byte(composeContent), 0644); err != nil { + t.Fatalf("Failed to write test compose file: %v", err) + } + + // Parse the compose file + mConfig := &fly.MachineConfig{} + err := ParseComposeFileWithPath(mConfig, composePath) + if err != nil { + t.Fatalf("Failed to parse compose file: %v", err) + } + + // Main image should be empty when using containers + if mConfig.Image != "" { + t.Errorf("Expected main image to be empty, got '%s'", mConfig.Image) + } + + // Verify containers were created + if len(mConfig.Containers) != 2 { + t.Errorf("Expected 2 containers, got %d", len(mConfig.Containers)) + } +} + +func TestComposeVolumeAndHealthcheck(t *testing.T) { + // Create a compose file with volumes and health checks + tmpDir := t.TempDir() + composePath := filepath.Join(tmpDir, "compose.yml") + + // Copy nginx.conf to temp directory + nginxConf := `server { + listen 80; + location / { + proxy_pass http://echo:80; + } +}` + nginxPath := filepath.Join(tmpDir, "nginx.conf") + if err := os.WriteFile(nginxPath, []byte(nginxConf), 0644); err != nil { + t.Fatalf("Failed to write nginx.conf: %v", err) + } + + composeContent := `version: "3.8" +services: + nginx: + image: nginx:latest + volumes: + - ./nginx.conf:/etc/nginx/conf.d/default.conf:ro + echo: + image: ealen/echo-server + healthcheck: + test: ["CMD", "wget", "--spider", "localhost:80"] + interval: 30s + timeout: 10s + retries: 3 +` + if err := os.WriteFile(composePath, []byte(composeContent), 0644); err != nil { + t.Fatalf("Failed to write test compose file: %v", err) + } + + // Parse the compose file + mConfig := &fly.MachineConfig{} + err := ParseComposeFileWithPath(mConfig, composePath) + if err != nil { + t.Fatalf("Failed to parse compose file: %v", err) + } + + // Find the nginx container + var nginxContainer *fly.ContainerConfig + var echoContainer *fly.ContainerConfig + for _, container := range mConfig.Containers { + if container.Name == "nginx" { + nginxContainer = container + } else if container.Name == "echo" { + echoContainer = container + } + } + + if nginxContainer == nil { + t.Fatal("nginx container not found") + } + if echoContainer == nil { + t.Fatal("echo container not found") + } + + // Check nginx has the volume mounted + nginxConfFound := false + for _, file := range nginxContainer.Files { + if file.GuestPath == "/etc/nginx/conf.d/default.conf" { + nginxConfFound = true + // Check content + if file.RawValue != nil { + decoded, err := base64.StdEncoding.DecodeString(*file.RawValue) + if err != nil { + t.Errorf("Failed to decode nginx.conf content: %v", err) + } else if !strings.Contains(string(decoded), "proxy_pass http://echo:80") { + t.Errorf("nginx.conf should contain proxy_pass directive") + } + } + break + } + } + if !nginxConfFound { + t.Error("nginx.conf volume mount not found") + } + + // Check echo container has health check + if len(echoContainer.Healthchecks) == 0 { + t.Error("echo container should have health check") + } else { + hc := echoContainer.Healthchecks[0] + if hc.Exec == nil { + t.Error("Expected exec health check") + } else { + // Command should be ["wget", "--spider", "localhost:80"] (without CMD) + expectedCmd := []string{"wget", "--spider", "localhost:80"} + if len(hc.Exec.Command) != len(expectedCmd) { + t.Errorf("Expected health check command %v, got %v", expectedCmd, hc.Exec.Command) + } else { + for i, cmd := range expectedCmd { + if i < len(hc.Exec.Command) && hc.Exec.Command[i] != cmd { + t.Errorf("Expected health check command[%d] '%s', got '%s'", i, cmd, hc.Exec.Command[i]) + } + } + } + } + // Check intervals + if hc.Interval != 30 { + t.Errorf("Expected interval 30, got %d", hc.Interval) + } + if hc.Timeout != 10 { + t.Errorf("Expected timeout 10, got %d", hc.Timeout) + } + if hc.FailureThreshold != 3 { + t.Errorf("Expected failure threshold 3, got %d", hc.FailureThreshold) + } + } +} + +func TestParseComposeFileWithBuild(t *testing.T) { + // Create a compose file with build section + tmpDir := t.TempDir() + composePath := filepath.Join(tmpDir, "compose.yml") + + composeContent := `version: "3" +services: + app: + build: . + environment: + APP_ENV: production + db: + image: postgres:14 +` + if err := os.WriteFile(composePath, []byte(composeContent), 0644); err != nil { + t.Fatalf("Failed to write test compose file: %v", err) + } + + // Parse the compose file + mConfig := &fly.MachineConfig{} + err := ParseComposeFileWithPath(mConfig, composePath) + if err != nil { + t.Fatalf("Failed to parse compose file with build: %v", err) + } + + // Verify containers were created + if len(mConfig.Containers) != 2 { + t.Errorf("Expected 2 containers, got %d", len(mConfig.Containers)) + } + + // Find containers + var appContainer, dbContainer *fly.ContainerConfig + for _, container := range mConfig.Containers { + switch container.Name { + case "app": + appContainer = container + case "db": + dbContainer = container + } + } + + if appContainer == nil { + t.Fatal("app container not found") + } + if dbContainer == nil { + t.Fatal("db container not found") + } + + // Service with build should have image "." + if appContainer.Image != "." { + t.Errorf("Expected app container image '.', got '%s'", appContainer.Image) + } + + // Service without build should have its specified image + if dbContainer.Image != "postgres:14" { + t.Errorf("Expected db container image 'postgres:14', got '%s'", dbContainer.Image) + } +} + +func TestParseComposeFileMultipleBuildError(t *testing.T) { + // Create a compose file with multiple build sections + tmpDir := t.TempDir() + composePath := filepath.Join(tmpDir, "compose.yml") + + composeContent := `version: "3" +services: + app1: + build: . + app2: + build: + context: ./app2 +` + if err := os.WriteFile(composePath, []byte(composeContent), 0644); err != nil { + t.Fatalf("Failed to write test compose file: %v", err) + } + + // Parse should fail + mConfig := &fly.MachineConfig{} + err := ParseComposeFileWithPath(mConfig, composePath) + if err == nil { + t.Fatal("Expected error for multiple services with build, got nil") + } + + if !strings.Contains(err.Error(), "only one service can specify build") { + t.Errorf("Expected error about multiple build services, got: %v", err) + } +} + +func TestParseComposeFileMissingImageAndBuild(t *testing.T) { + // Create a compose file with a service that has neither image nor build + tmpDir := t.TempDir() + composePath := filepath.Join(tmpDir, "compose.yml") + + composeContent := `version: "3" +services: + app: + environment: + APP_ENV: production +` + if err := os.WriteFile(composePath, []byte(composeContent), 0644); err != nil { + t.Fatalf("Failed to write test compose file: %v", err) + } + + // Parse should fail + mConfig := &fly.MachineConfig{} + err := ParseComposeFileWithPath(mConfig, composePath) + if err == nil { + t.Fatal("Expected error for service without image or build, got nil") + } + + if !strings.Contains(err.Error(), "must specify either 'image' or 'build'") { + t.Errorf("Expected error about missing image or build, got: %v", err) + } +} + +func TestParseComposeFileWithDependencies(t *testing.T) { + // Create a compose file with dependencies + tmpDir := t.TempDir() + composePath := filepath.Join(tmpDir, "compose.yml") + + composeContent := `version: "3.8" +services: + nginx: + image: nginx:latest + depends_on: + echo: + condition: service_healthy + echo: + build: . + healthcheck: + test: ["CMD", "wget", "-q0-", "localhost:80"] +` + if err := os.WriteFile(composePath, []byte(composeContent), 0644); err != nil { + t.Fatalf("Failed to write test compose file: %v", err) + } + + // Parse the compose file + mConfig := &fly.MachineConfig{} + err := ParseComposeFileWithPath(mConfig, composePath) + if err != nil { + t.Fatalf("Failed to parse compose file with dependencies: %v", err) + } + + // Verify containers were created + if len(mConfig.Containers) != 2 { + t.Errorf("Expected 2 containers, got %d", len(mConfig.Containers)) + } + + // Find containers + var nginxContainer, echoContainer *fly.ContainerConfig + for _, container := range mConfig.Containers { + switch container.Name { + case "nginx": + nginxContainer = container + case "echo": + echoContainer = container + } + } + + if nginxContainer == nil { + t.Fatal("nginx container not found") + } + if echoContainer == nil { + t.Fatal("echo container not found") + } + + // Check nginx dependencies + if len(nginxContainer.DependsOn) != 1 { + t.Errorf("Expected nginx to have 1 dependency, got %d", len(nginxContainer.DependsOn)) + } else { + dep := nginxContainer.DependsOn[0] + if dep.Name != "echo" { + t.Errorf("Expected dependency on 'echo', got '%s'", dep.Name) + } + if dep.Condition != fly.Healthy { + t.Errorf("Expected condition 'healthy', got '%s'", dep.Condition) + } + } + + // Check echo has no dependencies + if len(echoContainer.DependsOn) != 0 { + t.Errorf("Expected echo to have no dependencies, got %d", len(echoContainer.DependsOn)) + } + + // Check echo has health check + if len(echoContainer.Healthchecks) == 0 { + t.Error("Expected echo to have health check") + } +} + +func TestParseComposeFileShortDependencySyntax(t *testing.T) { + // Create a compose file with short dependency syntax + tmpDir := t.TempDir() + composePath := filepath.Join(tmpDir, "compose.yml") + + composeContent := `version: "3" +services: + web: + image: nginx:latest + depends_on: + - db + - redis + db: + image: postgres:14 + redis: + image: redis:alpine +` + if err := os.WriteFile(composePath, []byte(composeContent), 0644); err != nil { + t.Fatalf("Failed to write test compose file: %v", err) + } + + // Parse the compose file + mConfig := &fly.MachineConfig{} + err := ParseComposeFileWithPath(mConfig, composePath) + if err != nil { + t.Fatalf("Failed to parse compose file with short dependencies: %v", err) + } + + // Find web container + var webContainer *fly.ContainerConfig + for _, container := range mConfig.Containers { + if container.Name == "web" { + webContainer = container + break + } + } + + if webContainer == nil { + t.Fatal("web container not found") + } + + // Check dependencies + if len(webContainer.DependsOn) != 2 { + t.Errorf("Expected web to have 2 dependencies, got %d", len(webContainer.DependsOn)) + } + + depNames := make(map[string]bool) + for _, dep := range webContainer.DependsOn { + depNames[dep.Name] = true + if dep.Condition != fly.Started { + t.Errorf("Expected condition 'started' for short syntax, got '%s'", dep.Condition) + } + } + + if !depNames["db"] { + t.Error("Expected dependency on 'db'") + } + if !depNames["redis"] { + t.Error("Expected dependency on 'redis'") + } +} diff --git a/internal/containerconfig/parse.go b/internal/containerconfig/parse.go new file mode 100644 index 0000000000..9161f273d4 --- /dev/null +++ b/internal/containerconfig/parse.go @@ -0,0 +1,71 @@ +package containerconfig + +import ( + "fmt" + "path/filepath" + + fly "github.com/superfly/fly-go" + "github.com/superfly/flyctl/internal/config" +) + +// ParseContainerConfig determines the type of container configuration and parses it directly into mConfig +func ParseContainerConfig(mConfig *fly.MachineConfig, composePath, machineConfigStr, configFilePath, containerName string) error { + var selectedContainer *fly.ContainerConfig + + // Check if compose file is specified + if composePath != "" { + // Make path relative to fly.toml directory if not absolute + if !filepath.IsAbs(composePath) { + configDir := filepath.Dir(configFilePath) + composePath = filepath.Join(configDir, composePath) + } + if err := ParseComposeFileWithPath(mConfig, composePath); err != nil { + return err + } + } else if machineConfigStr != "" { + // Fall back to machine config if specified + if err := config.ParseConfig(mConfig, machineConfigStr); err != nil { + return err + } + + // Apply container selection logic only for machine config JSON + if len(mConfig.Containers) > 0 { + // Select which container should receive the built image + // Priority: specified containerName > "app" container > first container + match := containerName + if match == "" { + match = "app" + } + + for _, c := range mConfig.Containers { + if c.Name == match { + selectedContainer = c + break + } + } + + if selectedContainer == nil { + if containerName != "" { + return fmt.Errorf("container %q not found", containerName) + } else { + selectedContainer = mConfig.Containers[0] + } + } + } + } else { + return nil + } + + // Validate all containers have images and apply selectedContainer logic + for _, c := range mConfig.Containers { + if c == selectedContainer { + // For machine config, set the selected container's image to "." + c.Image = "." + } else if c.Image == "" { + // All other containers must have an image specified + return fmt.Errorf("container %q must have an image specified", c.Name) + } + } + + return nil +} diff --git a/internal/containerconfig/testdata/nginx.conf b/internal/containerconfig/testdata/nginx.conf new file mode 100644 index 0000000000..feb65eb247 --- /dev/null +++ b/internal/containerconfig/testdata/nginx.conf @@ -0,0 +1,10 @@ +server { + listen 80; + server_name localhost; + + location / { + proxy_pass http://echo:80; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + } +} diff --git a/internal/flag/context.go b/internal/flag/context.go index a3fb01beea..2d038f709f 100644 --- a/internal/flag/context.go +++ b/internal/flag/context.go @@ -142,6 +142,11 @@ func GetOrg(ctx context.Context) string { return org } +// GetMPGClusterID is shorthand for GetString(ctx, "cluster"). +func GetMPGClusterID(ctx context.Context) string { + return GetString(ctx, flagnames.MPGClusterID) +} + // GetRegion is shorthand for GetString(ctx, Region). func GetRegion(ctx context.Context) string { return GetString(ctx, flagnames.Region) @@ -191,3 +196,19 @@ func GetFlagsName(ctx context.Context, ignoreFlags []string) []string { func GetProcessGroup(ctx context.Context) string { return GetString(ctx, flagnames.ProcessGroup) } + +func GetBuildkitAddr(ctx context.Context) string { + addr := GetString(ctx, "buildkit-addr") + if addr == "" { + addr = env.First("BUILDKIT_ADDR") + } + return addr +} + +func GetBuildkitImage(ctx context.Context) string { + addr := GetString(ctx, "buildkit-image") + if addr == "" { + addr = env.First("BUILDKIT_IMAGE") + } + return addr +} diff --git a/internal/flag/flag.go b/internal/flag/flag.go index 6eb4d30aac..bff959645e 100644 --- a/internal/flag/flag.go +++ b/internal/flag/flag.go @@ -36,7 +36,7 @@ func makeAlias[T any](template T, name string) T { useAliasShortHandField := reflect.ValueOf(template).FieldByName("UseAliasShortHand") if useAliasShortHandField.IsValid() { useAliasShortHand := useAliasShortHandField.Interface().(bool) - if useAliasShortHand == true { + if useAliasShortHand { value.FieldByName("Shorthand").SetString(string(name[0])) } } @@ -320,6 +320,14 @@ func Org() String { } } +func MPGCluster() String { + return String{ + Name: "cluster", + Shorthand: "c", + Description: "The target cluster ID", + } +} + // Region returns a region string flag. func Region() String { return String{ @@ -577,6 +585,47 @@ func Nixpacks() Bool { } } +func BuildkitAddr() String { + return String{ + Name: "buildkit-addr", + Description: "Address of remote buildkit daemon (e.g. tcp://127.0.0.1:1234 or unix:///path/to/socket)", + EnvName: "BUILDKIT_ADDR", + Hidden: true, + } +} + +func BuildkitImage() String { + return String{ + Name: "buildkit-image", + Description: "Image to use for remote buildkit daemon", + EnvName: "BUILDKIT_IMAGE", + Hidden: true, + } +} + +func Buildkit() Bool { + return Bool{ + Name: "buildkit", + Description: "Deploy using buildkit-based remote builder", + } +} + +func Compression() String { + return String{ + Name: "compression", + Description: `Compression algorithm to use for the image. Options are "zstd" or "gzip". Defaults to "gzip".`, + Default: "gzip", + } +} + +func CompressionLevel() Int { + return Int{ + Name: "compression-level", + Description: `Compression level to use for the image. Defaults to 7.`, + Default: 7, + } +} + func Strategy() String { return String{ Name: "strategy", diff --git a/internal/flag/flagnames/constants.go b/internal/flag/flagnames/constants.go index 521749ce9a..7e229924a9 100644 --- a/internal/flag/flagnames/constants.go +++ b/internal/flag/flagnames/constants.go @@ -51,4 +51,10 @@ const ( // ProcessGroup denotes the name of the process group flag. ProcessGroup = "process-group" + + // MPGClusterID denotes the name of the MPG cluster ID flag. + MPGClusterID = "cluster" + + // MPGDatabase denotes the name of the MPG database flag. + MPGDatabase = "database" ) diff --git a/internal/flag/validation/compression.go b/internal/flag/validation/compression.go new file mode 100644 index 0000000000..d915051ad8 --- /dev/null +++ b/internal/flag/validation/compression.go @@ -0,0 +1,33 @@ +package validation + +import ( + "fmt" + + "github.com/superfly/flyctl/internal/flyerr" +) + +// ValidateCompressionFlag checks if the --compression flag has a valid value. +// This can be "gzip" (soon to be legacy) or "zstd" (what we'd like to be the default) +func ValidateCompressionFlag(compression string) error { + if compression == "" || compression == "gzip" || compression == "zstd" { + return nil // Valid + } + + return flyerr.GenericErr{ + Err: fmt.Sprintf("Invalid value '%s' for compression. Valid options are 'gzip', 'zstd', or leave unset.", compression), + Suggest: "Please use 'gzip', 'zstd', or omit the flag.", + } +} + +// ValidateCompressionLevelFlag checks if the --compression-level flag has a value between 0 and 9. +// This is what is currently supported by Depot Builder (they map these to proper zstd compression levels) +func ValidateCompressionLevelFlag(level int) error { + if level < 0 || level > 9 { + return flyerr.GenericErr{ + Err: fmt.Sprintf("Invalid value '%d' for compression level. Must be an integer between 0 and 9.", level), + Suggest: "Please use an integer between 0 and 9, or omit the flag.", + } + } + + return nil +} diff --git a/internal/flapsutil/flaps_client.go b/internal/flapsutil/flaps_client.go index f33faeb5e8..a4679c1789 100644 --- a/internal/flapsutil/flaps_client.go +++ b/internal/flapsutil/flaps_client.go @@ -15,17 +15,17 @@ type FlapsClient interface { AcquireLease(ctx context.Context, machineID string, ttl *int) (*fly.MachineLease, error) Cordon(ctx context.Context, machineID string, nonce string) (err error) CreateApp(ctx context.Context, name string, org string) (err error) - CreateSecret(ctx context.Context, sLabel, sType string, in fly.CreateSecretRequest) (err error) CreateVolume(ctx context.Context, req fly.CreateVolumeRequest) (*fly.Volume, error) CreateVolumeSnapshot(ctx context.Context, volumeId string) error DeleteMetadata(ctx context.Context, machineID, key string) error - DeleteSecret(ctx context.Context, label string) (err error) + DeleteAppSecret(ctx context.Context, name string) (*fly.DeleteAppSecretResp, error) + DeleteSecretKey(ctx context.Context, name string) error DeleteVolume(ctx context.Context, volumeId string) (*fly.Volume, error) Destroy(ctx context.Context, input fly.RemoveMachineInput, nonce string) (err error) Exec(ctx context.Context, machineID string, in *fly.MachineExecRequest) (*fly.MachineExecResponse, error) ExtendVolume(ctx context.Context, volumeId string, size_gb int) (*fly.Volume, bool, error) FindLease(ctx context.Context, machineID string) (*fly.MachineLease, error) - GenerateSecret(ctx context.Context, sLabel, sType string) (err error) + GenerateSecretKey(ctx context.Context, name string, typ string) (*fly.SetSecretKeyResp, error) Get(ctx context.Context, machineID string) (*fly.Machine, error) GetAllVolumes(ctx context.Context) ([]fly.Volume, error) GetMany(ctx context.Context, machineIDs []string) ([]*fly.Machine, error) @@ -39,17 +39,21 @@ type FlapsClient interface { List(ctx context.Context, state string) ([]*fly.Machine, error) ListActive(ctx context.Context) ([]*fly.Machine, error) ListFlyAppsMachines(ctx context.Context) ([]*fly.Machine, *fly.Machine, error) - ListSecrets(ctx context.Context) (out []fly.ListSecret, err error) + ListAppSecrets(ctx context.Context, version *uint64, showSecrets bool) ([]fly.AppSecret, error) + ListSecretKeys(ctx context.Context, version *uint64) ([]fly.SecretKey, error) NewRequest(ctx context.Context, method, path string, in interface{}, headers map[string][]string) (*http.Request, error) RefreshLease(ctx context.Context, machineID string, ttl *int, nonce string) (*fly.MachineLease, error) ReleaseLease(ctx context.Context, machineID, nonce string) error Restart(ctx context.Context, in fly.RestartMachineInput, nonce string) (err error) + SetAppSecret(ctx context.Context, name string, value string) (*fly.SetAppSecretResp, error) + SetSecretKey(ctx context.Context, name string, typ string, value []byte) (*fly.SetSecretKeyResp, error) SetMetadata(ctx context.Context, machineID, key, value string) error Start(ctx context.Context, machineID string, nonce string) (out *fly.MachineStartResponse, err error) Stop(ctx context.Context, in fly.StopMachineInput, nonce string) (err error) Suspend(ctx context.Context, machineID, nonce string) error Uncordon(ctx context.Context, machineID string, nonce string) (err error) Update(ctx context.Context, builder fly.LaunchMachineInput, nonce string) (out *fly.Machine, err error) + UpdateAppSecrets(ctx context.Context, values map[string]*string) (*fly.UpdateAppSecretsResp, error) UpdateVolume(ctx context.Context, volumeId string, req fly.UpdateVolumeRequest) (*fly.Volume, error) Wait(ctx context.Context, machine *fly.Machine, state string, timeout time.Duration) (err error) WaitForApp(ctx context.Context, name string) error diff --git a/internal/flapsutil/helpers.go b/internal/flapsutil/helpers.go new file mode 100644 index 0000000000..6ed320e0f1 --- /dev/null +++ b/internal/flapsutil/helpers.go @@ -0,0 +1,37 @@ +package flapsutil + +import ( + "context" + "fmt" + + "github.com/superfly/fly-go" + "github.com/superfly/fly-go/flaps" + + "github.com/superfly/flyctl/internal/flyerr" +) + +// SetClient builds a flaps client for appName and stores it in a new context which is returned. +// It also returns the flaps client and the AppCompact for appName, which it must lookup. +// If app is passed in, it is used, otherwise it is resolved from appName. +// On error the old context is returned along with the error. +// The context must already have the flyutil client set. +func SetClient(ctx context.Context, app *fly.AppCompact, appName string) (context.Context, FlapsClient, *fly.AppCompact, error) { + app, err := resolveApp(ctx, app, appName) + if err != nil { + return ctx, nil, nil, fmt.Errorf("get app %s: %w", appName, err) + } + + flapsClient, err := NewClientWithOptions(ctx, flaps.NewClientOpts{ + AppCompact: app, + AppName: app.Name, + }) + if err != nil { + err = flyerr.GenericErr{ + Err: fmt.Sprintf("could not create flaps client: %v", err), + } + return ctx, flapsClient, app, err + } + + ctx = NewContextWithClient(ctx, flapsClient) + return ctx, flapsClient, app, nil +} diff --git a/internal/flyutil/client.go b/internal/flyutil/client.go index 9a8a015de4..993b0a0f06 100644 --- a/internal/flyutil/client.go +++ b/internal/flyutil/client.go @@ -51,6 +51,7 @@ type Client interface { GetAppCertificates(ctx context.Context, appName string) ([]fly.AppCertificateCompact, error) GetAppCompact(ctx context.Context, appName string) (*fly.AppCompact, error) GetAppCurrentReleaseMachines(ctx context.Context, appName string) (*fly.Release, error) + GetAppCNAMETarget(ctx context.Context, appName string) (string, error) GetAppHostIssues(ctx context.Context, appName string) ([]fly.HostIssue, error) GetAppLimitedAccessTokens(ctx context.Context, appName string) ([]fly.LimitedAccessToken, error) GetAppLogs(ctx context.Context, appName, token, region, instanceID string) (entries []fly.LogEntry, nextToken string, err error) @@ -58,7 +59,6 @@ type Client interface { GetAppNameStateFromVolume(ctx context.Context, volID string) (*string, *string, error) GetAppNetwork(ctx context.Context, appName string) (*string, error) GetAppReleasesMachines(ctx context.Context, appName, status string, limit int) ([]fly.Release, error) - GetAppSecrets(ctx context.Context, appName string) ([]fly.Secret, error) GetApps(ctx context.Context, role *string) ([]fly.App, error) GetAppsForOrganization(ctx context.Context, orgID string) ([]fly.App, error) GetDeployerAppByOrg(ctx context.Context, orgID string) (*fly.App, error) @@ -70,7 +70,7 @@ type Client interface { GetDomains(ctx context.Context, organizationSlug string) ([]*fly.Domain, error) GetIPAddresses(ctx context.Context, appName string) ([]fly.IPAddress, error) GetEgressIPAddresses(ctx context.Context, appName string) (map[string][]fly.EgressIPAddress, error) - GetLatestImageDetails(ctx context.Context, image string) (*fly.ImageVersion, error) + GetLatestImageDetails(ctx context.Context, image string, flyVersion string) (*fly.ImageVersion, error) GetLatestImageTag(ctx context.Context, repository string, snapshotId *string) (string, error) GetLoggedCertificates(ctx context.Context, slug string) ([]fly.LoggedCertificate, error) GetMachine(ctx context.Context, machineId string) (*fly.GqlMachine, error) @@ -99,9 +99,7 @@ type Client interface { Run(req *graphql.Request) (fly.Query, error) RunWithContext(ctx context.Context, req *graphql.Request) (fly.Query, error) SetGenqClient(client genq.Client) - SetSecrets(ctx context.Context, appName string, secrets map[string]string) (*fly.Release, error) UpdateRelease(ctx context.Context, input fly.UpdateReleaseInput) (*fly.UpdateReleaseResponse, error) - UnsetSecrets(ctx context.Context, appName string, keys []string) (*fly.Release, error) ValidateWireGuardPeers(ctx context.Context, peerIPs []string) (invalid []string, err error) } diff --git a/internal/haikunator/haikunator.go b/internal/haikunator/haikunator.go index bc6198a65c..cdfd34eacd 100644 --- a/internal/haikunator/haikunator.go +++ b/internal/haikunator/haikunator.go @@ -4,11 +4,11 @@ import ( "crypto/rand" "math/big" rand2 "math/rand" + "slices" "strconv" "strings" "github.com/superfly/flyctl/helpers" - "golang.org/x/exp/slices" ) var adjectives = strings.Fields(` diff --git a/internal/incidents/hosts.go b/internal/incidents/hosts.go index 1c87b53dd3..d9e3caf97f 100644 --- a/internal/incidents/hosts.go +++ b/internal/incidents/hosts.go @@ -2,7 +2,6 @@ package incidents import ( "context" - "errors" "fmt" "time" @@ -37,30 +36,27 @@ func QueryHostIssues(ctx context.Context) { return } - task.FromContext(ctx).RunFinalizer(func(parent context.Context) { - logger.Debug("started querying for host issues") - - ctx, cancel := context.WithTimeout(context.WithoutCancel(parent), 3*time.Second) + statusCh := make(chan []fly.HostIssue, 1) + logger.Debug("started querying for host issues") + statusCtx, cancel := context.WithTimeout(context.WithoutCancel(ctx), time.Second) + go func() { defer cancel() + defer close(statusCh) + response, err := GetAppHostIssuesRequest(statusCtx, appName) + if err != nil { + logger.Debugf("failed querying for host issues: %v", err) + } + statusCh <- response + }() - switch hostIssues, err := GetAppHostIssuesRequest(ctx, appName); { - case err == nil: - if hostIssues == nil { - break - } - - logger.Debugf("querying for host issues resulted to %v", hostIssues) - hostIssuesCount := len(hostIssues) - if hostIssuesCount > 0 { - fmt.Fprintln(io.ErrOut, colorize.WarningIcon(), - colorize.Yellow("WARNING: There are active host issues affecting your app. Please check `fly incidents hosts list` or visit your app in https://fly.io/dashboard\n"), - ) - break - } - case errors.Is(err, context.Canceled), errors.Is(err, context.DeadlineExceeded): - logger.Debugf("failed querying for host issues. Context cancelled or deadline exceeded: %v", err) - default: - logger.Debugf("failed querying for host issues incidents: %v", err) + task.FromContext(ctx).RunFinalizer(func(parent context.Context) { + cancel() + hostIssues := <-statusCh + logger.Debugf("querying for host issues resulted to %v", hostIssues) + if len(hostIssues) > 0 { + fmt.Fprintln(io.ErrOut, colorize.WarningIcon(), + colorize.Yellow("WARNING: There are active host issues affecting your app. Please check `fly incidents hosts list` or visit your app in https://fly.io/dashboard\n"), + ) } }) } diff --git a/internal/incidents/statuspage.go b/internal/incidents/statuspage.go index e85b1bf808..8ed52bef63 100644 --- a/internal/incidents/statuspage.go +++ b/internal/incidents/statuspage.go @@ -3,7 +3,6 @@ package incidents import ( "context" "encoding/json" - "errors" "fmt" "net/http" "os" @@ -48,32 +47,32 @@ func QueryStatuspageIncidents(ctx context.Context) { logger := logger.FromContext(ctx) io := iostreams.FromContext(ctx) colorize := io.ColorScheme() + logger.Debug("started querying for statuspage incidents") - task.FromContext(ctx).RunFinalizer(func(parent context.Context) { - logger.Debug("started querying for statuspage incidents") - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) + statusCh := make(chan *StatusPageApiResponse, 1) + statusCtx, cancel := context.WithTimeout(context.Background(), time.Second) + go func() { defer cancel() - - switch incidents, err := StatuspageIncidentsRequest(ctx); { - case err == nil: - if incidents == nil { - break - } - - logger.Debugf("querying for statuspage incidents resulted to %v", incidents) - incidentCount := len(incidents.Incidents) - if incidentCount > 0 { - fmt.Fprintln(io.ErrOut, colorize.WarningIcon(), - colorize.Yellow("WARNING: There are active incidents. Please check `fly incidents list` or visit https://status.flyio.net\n"), - ) - break - } - case errors.Is(err, context.Canceled), errors.Is(err, context.DeadlineExceeded): - logger.Debugf("failed querying for Statuspage incidents. Context cancelled or deadline exceeded: %v", err) - default: + defer close(statusCh) + response, err := StatuspageIncidentsRequest(statusCtx) + if err != nil { logger.Debugf("failed querying for Statuspage incidents: %v", err) } + statusCh <- response + }() + + task.FromContext(ctx).RunFinalizer(func(parent context.Context) { + cancel() + incidents := <-statusCh + if incidents == nil { + return + } + logger.Debugf("querying for statuspage incidents resulted to %v", incidents) + if len(incidents.Incidents) > 0 { + fmt.Fprintln(io.ErrOut, colorize.WarningIcon(), + colorize.Yellow("WARNING: There are active incidents. Please check `fly incidents list` or visit https://status.flyio.net\n"), + ) + } }) } diff --git a/internal/inmem/client.go b/internal/inmem/client.go index 1231cf5020..19c4992d3c 100644 --- a/internal/inmem/client.go +++ b/internal/inmem/client.go @@ -216,6 +216,10 @@ func (m *Client) GetAppCurrentReleaseMachines(ctx context.Context, appName strin panic("TODO") } +func (m *Client) GetAppCNAMETarget(ctx context.Context, appName string) (string, error) { + panic("TODO") +} + func (m *Client) GetAppHostIssues(ctx context.Context, appName string) ([]fly.HostIssue, error) { panic("TODO") } @@ -292,11 +296,11 @@ func (m *Client) GetIPAddresses(ctx context.Context, appName string) ([]fly.IPAd return nil, nil // TODO } -func (c *Client) GetEgressIPAddresses(ctx context.Context, appName string) (map[string][]fly.EgressIPAddress, error) { +func (m *Client) GetEgressIPAddresses(ctx context.Context, appName string) (map[string][]fly.EgressIPAddress, error) { panic("TODO") } -func (m *Client) GetLatestImageDetails(ctx context.Context, image string) (*fly.ImageVersion, error) { +func (m *Client) GetLatestImageDetails(ctx context.Context, image string, flyVersion string) (*fly.ImageVersion, error) { panic("TODO") } @@ -317,7 +321,11 @@ func (m *Client) GetNearestRegion(ctx context.Context) (*fly.Region, error) { } func (m *Client) GetOrganizationByApp(ctx context.Context, appName string) (*fly.Organization, error) { - panic("TODO") + app, err := m.GetAppCompact(ctx, appName) + if err != nil { + return nil, err + } + return &fly.Organization{ID: app.Organization.ID}, nil } func (m *Client) GetOrganizationBySlug(ctx context.Context, slug string) (*fly.Organization, error) { diff --git a/internal/inmem/flaps_client.go b/internal/inmem/flaps_client.go index 8eb40a42dc..4bfa9b958d 100644 --- a/internal/inmem/flaps_client.go +++ b/internal/inmem/flaps_client.go @@ -36,10 +36,6 @@ func (m *FlapsClient) CreateApp(ctx context.Context, name string, org string) (e panic("TODO") } -func (m *FlapsClient) CreateSecret(ctx context.Context, sLabel, sType string, in fly.CreateSecretRequest) (err error) { - panic("TODO") -} - func (m *FlapsClient) CreateVolume(ctx context.Context, req fly.CreateVolumeRequest) (*fly.Volume, error) { panic("TODO") } @@ -52,7 +48,11 @@ func (m *FlapsClient) DeleteMetadata(ctx context.Context, machineID, key string) panic("TODO") } -func (m *FlapsClient) DeleteSecret(ctx context.Context, label string) (err error) { +func (m *FlapsClient) DeleteAppSecret(ctx context.Context, name string) (*fly.DeleteAppSecretResp, error) { + panic("TODO") +} + +func (m *FlapsClient) DeleteSecretKey(ctx context.Context, name string) error { panic("TODO") } @@ -76,7 +76,7 @@ func (m *FlapsClient) FindLease(ctx context.Context, machineID string) (*fly.Mac panic("TODO") } -func (m *FlapsClient) GenerateSecret(ctx context.Context, sLabel, sType string) (err error) { +func (m *FlapsClient) GenerateSecretKey(ctx context.Context, name string, typ string) (*fly.SetSecretKeyResp, error) { panic("TODO") } @@ -152,7 +152,11 @@ func (m *FlapsClient) ListFlyAppsMachines(ctx context.Context) (machines []*fly. return machines, releaseCmdMachine, nil } -func (m *FlapsClient) ListSecrets(ctx context.Context) (out []fly.ListSecret, err error) { +func (m *FlapsClient) ListAppSecrets(ctx context.Context, version *uint64, showSecrets bool) ([]fly.AppSecret, error) { + panic("TODO") +} + +func (m *FlapsClient) ListSecretKeys(ctx context.Context, version *uint64) ([]fly.SecretKey, error) { panic("TODO") } @@ -176,6 +180,14 @@ func (m *FlapsClient) SetMetadata(ctx context.Context, machineID, key, value str panic("TODO") } +func (m *FlapsClient) SetAppSecret(ctx context.Context, name string, value string) (*fly.SetAppSecretResp, error) { + panic("TODO") +} + +func (m *FlapsClient) SetSecretKey(ctx context.Context, name string, typ string, value []byte) (*fly.SetSecretKeyResp, error) { + panic("TODO") +} + func (m *FlapsClient) Start(ctx context.Context, machineID string, nonce string) (out *fly.MachineStartResponse, err error) { panic("TODO") } @@ -196,6 +208,10 @@ func (m *FlapsClient) Update(ctx context.Context, builder fly.LaunchMachineInput panic("TODO") } +func (m *FlapsClient) UpdateAppSecrets(ctx context.Context, values map[string]*string) (*fly.UpdateAppSecretsResp, error) { + panic("TODO") +} + func (m *FlapsClient) UpdateVolume(ctx context.Context, volumeId string, req fly.UpdateVolumeRequest) (*fly.Volume, error) { panic("TODO") } diff --git a/internal/launchdarkly/launchdarkly.go b/internal/launchdarkly/launchdarkly.go index 1f9f601bf2..aa9f3f5e4b 100644 --- a/internal/launchdarkly/launchdarkly.go +++ b/internal/launchdarkly/launchdarkly.go @@ -76,6 +76,22 @@ func NewClient(ctx context.Context, userInfo UserInfo) (*Client, error) { return ldClient, nil } +func NewServiceClient() (*Client, error) { + ctx := context.Background() + _, span := tracing.GetTracer().Start(ctx, "new_flyctl_feature_flag_client") + defer span.End() + + ldClient := &Client{ldContext: ldcontext.NewWithKind(ldcontext.Kind("service"), "flyctl"), flagsMutex: sync.Mutex{}} + + timeoutCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + // we don't really care if this errors or not, but it's good to at least try + _ = ldClient.updateFeatureFlags(timeoutCtx) + + go ldClient.monitor(ctx) + return ldClient, nil +} + func (ldClient *Client) monitor(ctx context.Context) { logger := logger.MaybeFromContext(ctx) @@ -177,3 +193,33 @@ func (ldClient *Client) updateFeatureFlags(ctx context.Context) error { return nil } + +func (ldClient *Client) ManagedPostgresEnabled() bool { + choice := ldClient.getLaunchPostgresChoiceFlag() + return choice == "mpg" || choice == "both" +} + +func (ldClient *Client) UnmanagedPostgresEnabled() bool { + choice := ldClient.getLaunchPostgresChoiceFlag() + return choice == "unmanaged-pg" || choice == "both" +} + +func (ldClient *Client) getLaunchPostgresChoiceFlag() string { + return ldClient.GetFeatureFlagValue("launch-postgres-choice", "unmanaged-pg").(string) +} + +func (ldClient *Client) getManagedBuilderEnabled() bool { + return ldClient.GetFeatureFlagValue("managed-builder", false).(bool) +} + +func (ldClient *Client) ManagedBuilderEnabled() bool { + return ldClient.getManagedBuilderEnabled() +} + +func (ldClient *Client) UseZstdEnabled() bool { + return ldClient.GetFeatureFlagValue("use-zstd-for-docker-images", false).(bool) +} + +func (ldClient *Client) GetCompressionStrength() any { + return ldClient.GetFeatureFlagValue("flyctl-compression-strength", 7) +} diff --git a/internal/machine/leasable_machine.go b/internal/machine/leasable_machine.go index 5653516906..23ecd7d7cc 100644 --- a/internal/machine/leasable_machine.go +++ b/internal/machine/leasable_machine.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "net/http" + "strings" "sync" "time" @@ -19,7 +20,6 @@ import ( "github.com/superfly/flyctl/terminal" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" - "golang.org/x/exp/maps" ) type LeasableMachine interface { @@ -60,8 +60,10 @@ type leasableMachine struct { leaseNonce string } -// TODO: make sure the other functions handle showLogs correctly +// NewLeasableMachine creates a wrapper for the given machine. +// A lease must be held before calling this function. func NewLeasableMachine(flapsClient flapsutil.FlapsClient, io *iostreams.IOStreams, machine *fly.Machine, showLogs bool) LeasableMachine { + // TODO: make sure the other functions handle showLogs correctly return &leasableMachine{ flapsClient: flapsClient, io: io, @@ -321,16 +323,6 @@ func (lm *leasableMachine) WaitForHealthchecksToPass(ctx context.Context, timeou waitCtx, cancel := ctrlc.HookCancelableContext(context.WithTimeout(ctx, timeout)) defer cancel() - checkDefs := maps.Values(lm.Machine().Config.Checks) - for _, s := range lm.Machine().Config.Services { - checkDefs = append(checkDefs, s.Checks...) - } - shortestInterval := 120 * time.Second - for _, c := range checkDefs { - if c.Interval != nil && c.Interval.Duration < shortestInterval { - shortestInterval = c.Interval.Duration - } - } b := &backoff.Backoff{ Min: 1 * time.Second, Max: 2 * time.Second, @@ -496,26 +488,29 @@ func (lm *leasableMachine) StartBackgroundLeaseRefresh(ctx context.Context, leas } func (lm *leasableMachine) refreshLeaseUntilCanceled(ctx context.Context, duration time.Duration, delayBetween time.Duration) { - var ( - err error - b = &backoff.Backoff{ - Min: delayBetween - 20*time.Millisecond, - Max: delayBetween + 20*time.Millisecond, - Jitter: true, - } - ) + b := &backoff.Backoff{ + Min: delayBetween - 20*time.Millisecond, + Max: delayBetween + 20*time.Millisecond, + Jitter: true, + } + for { - err = lm.RefreshLease(ctx, duration) - switch { + time.Sleep(b.Duration()) + switch err := lm.RefreshLease(ctx, duration); { + case err == nil: + // good times case errors.Is(err, context.Canceled): return - case err != nil: + case strings.Contains(err.Error(), "machine not found"): + // machine is gone, no need to refresh its lease + return + default: terminal.Warnf("error refreshing lease for machine %s: %v\n", lm.machine.ID, err) } - time.Sleep(b.Duration()) } } +// ReleaseLease releases the lease on this machine. func (lm *leasableMachine) ReleaseLease(ctx context.Context) error { lm.mu.Lock() defer lm.mu.Unlock() @@ -536,13 +531,7 @@ func (lm *leasableMachine) ReleaseLease(ctx context.Context) error { defer cancel() } - err := lm.flapsClient.ReleaseLease(ctx, lm.machine.ID, nonce) - contextTimedOutOrCanceled := errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) - if err != nil && (!contextWasAlreadyCanceled || !contextTimedOutOrCanceled) { - terminal.Warnf("failed to release lease for machine %s: %v\n", lm.machine.ID, err) - return err - } - return nil + return lm.flapsClient.ReleaseLease(ctx, lm.machine.ID, nonce) } func (lm *leasableMachine) resetLease() { @@ -554,13 +543,9 @@ func (lm *leasableMachine) resetLease() { func (lm *leasableMachine) GetMinIntervalAndMinGracePeriod() (time.Duration, time.Duration) { minInterval := 60 * time.Second - - checkDefs := maps.Values(lm.Machine().Config.Checks) - for _, s := range lm.Machine().Config.Services { - checkDefs = append(checkDefs, s.Checks...) - } minGracePeriod := time.Second - for _, c := range checkDefs { + + for _, c := range lm.Machine().Config.Checks { if c.Interval != nil && c.Interval.Duration < minInterval { minInterval = c.Interval.Duration } @@ -569,6 +554,17 @@ func (lm *leasableMachine) GetMinIntervalAndMinGracePeriod() (time.Duration, tim minGracePeriod = c.GracePeriod.Duration } } + for _, s := range lm.Machine().Config.Services { + for _, c := range s.Checks { + if c.Interval != nil && c.Interval.Duration < minInterval { + minInterval = c.Interval.Duration + } + + if c.GracePeriod != nil && c.GracePeriod.Duration < minGracePeriod { + minGracePeriod = c.GracePeriod.Duration + } + } + } return minInterval, minGracePeriod } diff --git a/internal/machine/machine_set.go b/internal/machine/machine_set.go index ae905e3cfb..01c4d38822 100644 --- a/internal/machine/machine_set.go +++ b/internal/machine/machine_set.go @@ -15,6 +15,7 @@ import ( "github.com/superfly/flyctl/internal/tracing" "github.com/superfly/flyctl/iostreams" "github.com/superfly/flyctl/terminal" + "golang.org/x/sync/errgroup" ) type MachineSet interface { @@ -49,36 +50,27 @@ func (ms *machineSet) GetMachines() []LeasableMachine { return ms.machines } +// AcquireLeases acquires leases on all machines in the set for the given duration. func (ms *machineSet) AcquireLeases(ctx context.Context, duration time.Duration) error { if len(ms.machines) == 0 { return nil } - results := make(chan error, len(ms.machines)) - var wg sync.WaitGroup + // Don't override ctx. Even leaseCtx is cancelled, we still want to release the leases. + eg, leaseCtx := errgroup.WithContext(ctx) for _, m := range ms.machines { - wg.Add(1) - go func(m LeasableMachine) { - defer wg.Done() - results <- m.AcquireLease(ctx, duration) - }(m) + eg.Go(func() error { + return m.AcquireLease(leaseCtx, duration) + }) } - go func() { - wg.Wait() - close(results) - }() - hadError := false - for err := range results { - if err != nil { - hadError = true - terminal.Warnf("failed to acquire lease: %v\n", err) - } - } - if hadError { + + waitErr := eg.Wait() + if waitErr != nil { + terminal.Warnf("failed to acquire lease: %v\n", waitErr) if err := ms.ReleaseLeases(ctx); err != nil { terminal.Warnf("error releasing machine leases: %v\n", err) } - return fmt.Errorf("error acquiring leases on all machines") + return waitErr } return nil } @@ -100,6 +92,7 @@ func (ms *machineSet) RemoveMachines(ctx context.Context, machines []LeasableMac return subset.ReleaseLeases(ctx) } +// ReleaseLeases releases leases on all machines in this set. func (ms *machineSet) ReleaseLeases(ctx context.Context) error { if len(ms.machines) == 0 { return nil @@ -130,10 +123,15 @@ func (ms *machineSet) ReleaseLeases(ctx context.Context) error { }() hadError := false for err := range results { - contextTimedOutOrCanceled := errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) - if err != nil && (!contextWasAlreadyCanceled || !contextTimedOutOrCanceled) { - hadError = true - terminal.Warnf("failed to release lease: %v\n", err) + if err != nil { + contextTimedOutOrCanceled := errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) + var ferr *flaps.FlapsError + if errors.As(err, &ferr) && ferr.ResponseStatusCode == http.StatusNotFound { + // Having StatusNotFound is expected, if acquiring this entire set is partially failing. + } else if !contextWasAlreadyCanceled || !contextTimedOutOrCanceled { + hadError = true + terminal.Warnf("failed to release lease: %v\n", err) + } } } if hadError { diff --git a/internal/machine/update.go b/internal/machine/update.go index bdbc79133d..99cba6d50e 100644 --- a/internal/machine/update.go +++ b/internal/machine/update.go @@ -3,6 +3,7 @@ package machine import ( "context" "fmt" + "maps" "slices" "time" @@ -10,12 +11,11 @@ import ( "github.com/superfly/flyctl/internal/flapsutil" "github.com/superfly/flyctl/internal/watch" "github.com/superfly/flyctl/iostreams" - "golang.org/x/exp/maps" ) var cpusPerKind = map[string][]int{ "shared": {1, 2, 4, 6, 8}, - "performance": {1, 2, 4, 6, 8, 10, 12, 14, 16}, + "performance": {1, 2, 4, 6, 8, 10, 12, 14, 16, 32, 64, 128}, } func Update(ctx context.Context, m *fly.Machine, input *fly.LaunchMachineInput) error { @@ -151,7 +151,7 @@ func (e InvalidConfigErr) Suggestion() string { validNumCpus := cpusPerKind[e.guest.CPUKind] return fmt.Sprintf("Valid numbers are %v", validNumCpus) case invalidMemorySize: - var incrementSize int = 1024 + var incrementSize = 1024 switch e.guest.CPUKind { case "shared": incrementSize = 256 diff --git a/internal/metrics/synthetics/agent.go b/internal/metrics/synthetics/agent.go index 1f9bea3d98..8951fa0988 100644 --- a/internal/metrics/synthetics/agent.go +++ b/internal/metrics/synthetics/agent.go @@ -78,7 +78,7 @@ type ProbeMessage struct { func processProbe(ctx context.Context, probeMessageJSON []byte, ws *SyntheticsWs) error { logger := logger.FromContext(ctx) - logger.Debug("proccessing probes") + logger.Debug("processing probes") probeMessage := ProbeMessage{} err := json.Unmarshal(probeMessageJSON, &probeMessage) diff --git a/internal/mock/client.go b/internal/mock/client.go index 6c458ac763..187468f4cc 100644 --- a/internal/mock/client.go +++ b/internal/mock/client.go @@ -53,6 +53,7 @@ type Client struct { GetAppCompactFunc func(ctx context.Context, appName string) (*fly.AppCompact, error) GetDeployerAppByOrgFunc func(ctx context.Context, orgID string) (*fly.App, error) GetAppCurrentReleaseMachinesFunc func(ctx context.Context, appName string) (*fly.Release, error) + GetAppCNAMETargetFunc func(ctx context.Context, appName string) (string, error) GetAppHostIssuesFunc func(ctx context.Context, appName string) ([]fly.HostIssue, error) GetAppLimitedAccessTokensFunc func(ctx context.Context, appName string) ([]fly.LimitedAccessToken, error) GetAppLogsFunc func(ctx context.Context, appName, token, region, instanceID string) (entries []fly.LogEntry, nextToken string, err error) @@ -71,7 +72,7 @@ type Client struct { GetDomainsFunc func(ctx context.Context, organizationSlug string) ([]*fly.Domain, error) GetIPAddressesFunc func(ctx context.Context, appName string) ([]fly.IPAddress, error) GetEgressIPAddressesFunc func(ctx context.Context, appName string) (map[string][]fly.EgressIPAddress, error) - GetLatestImageDetailsFunc func(ctx context.Context, image string) (*fly.ImageVersion, error) + GetLatestImageDetailsFunc func(ctx context.Context, image string, flyVersion string) (*fly.ImageVersion, error) GetLatestImageTagFunc func(ctx context.Context, repository string, snapshotId *string) (string, error) GetLoggedCertificatesFunc func(ctx context.Context, slug string) ([]fly.LoggedCertificate, error) GetMachineFunc func(ctx context.Context, machineId string) (*fly.GqlMachine, error) @@ -263,6 +264,10 @@ func (m *Client) GetAppCurrentReleaseMachines(ctx context.Context, appName strin return m.GetAppCurrentReleaseMachinesFunc(ctx, appName) } +func (m *Client) GetAppCNAMETarget(ctx context.Context, appName string) (string, error) { + return m.GetAppCNAMETargetFunc(ctx, appName) +} + func (m *Client) GetAppHostIssues(ctx context.Context, appName string) ([]fly.HostIssue, error) { return m.GetAppHostIssuesFunc(ctx, appName) } @@ -335,8 +340,8 @@ func (m *Client) GetEgressIPAddresses(ctx context.Context, appName string) (map[ return m.GetEgressIPAddressesFunc(ctx, appName) } -func (m *Client) GetLatestImageDetails(ctx context.Context, image string) (*fly.ImageVersion, error) { - return m.GetLatestImageDetailsFunc(ctx, image) +func (m *Client) GetLatestImageDetails(ctx context.Context, image string, flyVersion string) (*fly.ImageVersion, error) { + return m.GetLatestImageDetailsFunc(ctx, image, flyVersion) } func (m *Client) GetLatestImageTag(ctx context.Context, repository string, snapshotId *string) (string, error) { diff --git a/internal/mock/flaps_client.go b/internal/mock/flaps_client.go index e6dc8ad23a..30c5150d69 100644 --- a/internal/mock/flaps_client.go +++ b/internal/mock/flaps_client.go @@ -15,17 +15,17 @@ type FlapsClient struct { AcquireLeaseFunc func(ctx context.Context, machineID string, ttl *int) (*fly.MachineLease, error) CordonFunc func(ctx context.Context, machineID string, nonce string) (err error) CreateAppFunc func(ctx context.Context, name string, org string) (err error) - CreateSecretFunc func(ctx context.Context, sLabel, sType string, in fly.CreateSecretRequest) (err error) CreateVolumeFunc func(ctx context.Context, req fly.CreateVolumeRequest) (*fly.Volume, error) CreateVolumeSnapshotFunc func(ctx context.Context, volumeId string) error DeleteMetadataFunc func(ctx context.Context, machineID, key string) error - DeleteSecretFunc func(ctx context.Context, label string) (err error) + DeleteAppSecretFunc func(ctx context.Context, name string) (*fly.DeleteAppSecretResp, error) + DeleteSecretKeyFunc func(ctx context.Context, name string) error DeleteVolumeFunc func(ctx context.Context, volumeId string) (*fly.Volume, error) DestroyFunc func(ctx context.Context, input fly.RemoveMachineInput, nonce string) (err error) ExecFunc func(ctx context.Context, machineID string, in *fly.MachineExecRequest) (*fly.MachineExecResponse, error) ExtendVolumeFunc func(ctx context.Context, volumeId string, size_gb int) (*fly.Volume, bool, error) FindLeaseFunc func(ctx context.Context, machineID string) (*fly.MachineLease, error) - GenerateSecretFunc func(ctx context.Context, sLabel, sType string) (err error) + GenerateSecretKeyFunc func(ctx context.Context, name string, typ string) (*fly.SetSecretKeyResp, error) GetFunc func(ctx context.Context, machineID string) (*fly.Machine, error) GetAllVolumesFunc func(ctx context.Context) ([]fly.Volume, error) GetManyFunc func(ctx context.Context, machineIDs []string) ([]*fly.Machine, error) @@ -39,17 +39,21 @@ type FlapsClient struct { ListFunc func(ctx context.Context, state string) ([]*fly.Machine, error) ListActiveFunc func(ctx context.Context) ([]*fly.Machine, error) ListFlyAppsMachinesFunc func(ctx context.Context) ([]*fly.Machine, *fly.Machine, error) - ListSecretsFunc func(ctx context.Context) (out []fly.ListSecret, err error) + ListAppSecretsFunc func(ctx context.Context, version *uint64, showSecrets bool) ([]fly.AppSecret, error) + ListSecretKeysFunc func(ctx context.Context, version *uint64) ([]fly.SecretKey, error) NewRequestFunc func(ctx context.Context, method, path string, in interface{}, headers map[string][]string) (*http.Request, error) RefreshLeaseFunc func(ctx context.Context, machineID string, ttl *int, nonce string) (*fly.MachineLease, error) ReleaseLeaseFunc func(ctx context.Context, machineID, nonce string) error RestartFunc func(ctx context.Context, in fly.RestartMachineInput, nonce string) (err error) SetMetadataFunc func(ctx context.Context, machineID, key, value string) error + SetAppSecretFunc func(ctx context.Context, name string, value string) (*fly.SetAppSecretResp, error) + SetSecretKeyFunc func(ctx context.Context, name string, typ string, value []byte) (*fly.SetSecretKeyResp, error) StartFunc func(ctx context.Context, machineID string, nonce string) (out *fly.MachineStartResponse, err error) StopFunc func(ctx context.Context, in fly.StopMachineInput, nonce string) (err error) SuspendFunc func(ctx context.Context, machineID, nonce string) (err error) UncordonFunc func(ctx context.Context, machineID string, nonce string) (err error) UpdateFunc func(ctx context.Context, builder fly.LaunchMachineInput, nonce string) (out *fly.Machine, err error) + UpdateAppSecretsFunc func(ctx context.Context, values map[string]*string) (*fly.UpdateAppSecretsResp, error) UpdateVolumeFunc func(ctx context.Context, volumeId string, req fly.UpdateVolumeRequest) (*fly.Volume, error) WaitFunc func(ctx context.Context, machine *fly.Machine, state string, timeout time.Duration) (err error) WaitForAppFunc func(ctx context.Context, name string) error @@ -67,10 +71,6 @@ func (m *FlapsClient) CreateApp(ctx context.Context, name string, org string) (e return m.CreateAppFunc(ctx, name, org) } -func (m *FlapsClient) CreateSecret(ctx context.Context, sLabel, sType string, in fly.CreateSecretRequest) (err error) { - return m.CreateSecretFunc(ctx, sLabel, sType, in) -} - func (m *FlapsClient) CreateVolume(ctx context.Context, req fly.CreateVolumeRequest) (*fly.Volume, error) { return m.CreateVolumeFunc(ctx, req) } @@ -83,8 +83,12 @@ func (m *FlapsClient) DeleteMetadata(ctx context.Context, machineID, key string) return m.DeleteMetadataFunc(ctx, machineID, key) } -func (m *FlapsClient) DeleteSecret(ctx context.Context, label string) (err error) { - return m.DeleteSecretFunc(ctx, label) +func (m *FlapsClient) DeleteAppSecret(ctx context.Context, name string) (*fly.DeleteAppSecretResp, error) { + return m.DeleteAppSecretFunc(ctx, name) +} + +func (m *FlapsClient) DeleteSecretKey(ctx context.Context, name string) (err error) { + return m.DeleteSecretKeyFunc(ctx, name) } func (m *FlapsClient) DeleteVolume(ctx context.Context, volumeId string) (*fly.Volume, error) { @@ -107,8 +111,8 @@ func (m *FlapsClient) FindLease(ctx context.Context, machineID string) (*fly.Mac return m.FindLeaseFunc(ctx, machineID) } -func (m *FlapsClient) GenerateSecret(ctx context.Context, sLabel, sType string) (err error) { - return m.GenerateSecretFunc(ctx, sLabel, sType) +func (m *FlapsClient) GenerateSecretKey(ctx context.Context, name string, typ string) (*fly.SetSecretKeyResp, error) { + return m.GenerateSecretKeyFunc(ctx, name, typ) } func (m *FlapsClient) Get(ctx context.Context, machineID string) (*fly.Machine, error) { @@ -163,8 +167,12 @@ func (m *FlapsClient) ListFlyAppsMachines(ctx context.Context) ([]*fly.Machine, return m.ListFlyAppsMachinesFunc(ctx) } -func (m *FlapsClient) ListSecrets(ctx context.Context) (out []fly.ListSecret, err error) { - return m.ListSecretsFunc(ctx) +func (m *FlapsClient) ListAppSecrets(ctx context.Context, version *uint64, showSecrets bool) (out []fly.AppSecret, err error) { + return m.ListAppSecretsFunc(ctx, version, showSecrets) +} + +func (m *FlapsClient) ListSecretKeys(ctx context.Context, version *uint64) (out []fly.SecretKey, err error) { + return m.ListSecretKeysFunc(ctx, version) } func (m *FlapsClient) NewRequest(ctx context.Context, method, path string, in interface{}, headers map[string][]string) (*http.Request, error) { @@ -187,6 +195,14 @@ func (m *FlapsClient) SetMetadata(ctx context.Context, machineID, key, value str return m.SetMetadataFunc(ctx, machineID, key, value) } +func (m *FlapsClient) SetAppSecret(ctx context.Context, name string, value string) (*fly.SetAppSecretResp, error) { + return m.SetAppSecretFunc(ctx, name, value) +} + +func (m *FlapsClient) SetSecretKey(ctx context.Context, name string, typ string, value []byte) (*fly.SetSecretKeyResp, error) { + return m.SetSecretKeyFunc(ctx, name, typ, value) +} + func (m *FlapsClient) Start(ctx context.Context, machineID string, nonce string) (out *fly.MachineStartResponse, err error) { return m.StartFunc(ctx, machineID, nonce) } @@ -207,6 +223,10 @@ func (m *FlapsClient) Update(ctx context.Context, builder fly.LaunchMachineInput return m.UpdateFunc(ctx, builder, nonce) } +func (m *FlapsClient) UpdateAppSecrets(ctx context.Context, values map[string]*string) (out *fly.UpdateAppSecretsResp, err error) { + return m.UpdateAppSecretsFunc(ctx, values) +} + func (m *FlapsClient) UpdateVolume(ctx context.Context, volumeId string, req fly.UpdateVolumeRequest) (*fly.Volume, error) { return m.UpdateVolumeFunc(ctx, volumeId, req) } diff --git a/internal/render/render.go b/internal/render/render.go index 8cfe644331..11703ba8dc 100644 --- a/internal/render/render.go +++ b/internal/render/render.go @@ -24,9 +24,10 @@ func TitledJSON(w io.Writer, title string, v interface{}) error { }) } -// Table renders the table defined by the given properties into w. Both title & -// cols are optional. -func Table(w io.Writer, title string, rows [][]string, cols ...string) error { +// NewTable creates and configures a new tablewriter.Table with our default +// settings. The caller can make other configuration changes before calling +// table.Render() on the returned Table. +func NewTable(w io.Writer, title string, rows [][]string, cols ...string) *tablewriter.Table { if title != "" { fmt.Fprintln(w, aurora.Bold(title)) } @@ -49,6 +50,14 @@ func Table(w io.Writer, title string, rows [][]string, cols ...string) error { table.AppendBulk(rows) + return table +} + +// Table renders the table defined by the given properties into w. Both title & +// cols are optional. +func Table(w io.Writer, title string, rows [][]string, cols ...string) error { + table := NewTable(w, title, rows, cols...) + table.Render() fmt.Fprintln(w) diff --git a/internal/set/set_test.go b/internal/set/set_test.go index e0d047c800..b9414ac095 100644 --- a/internal/set/set_test.go +++ b/internal/set/set_test.go @@ -1,10 +1,10 @@ package set import ( + "slices" "testing" "github.com/stretchr/testify/assert" - "golang.org/x/exp/slices" ) func TestSet(t *testing.T) { diff --git a/internal/tracing/tracing.go b/internal/tracing/tracing.go index 64192d36ae..3cd37b8933 100644 --- a/internal/tracing/tracing.go +++ b/internal/tracing/tracing.go @@ -39,11 +39,6 @@ func getCollectorUrl() string { if url != "" { return url } - - if buildinfo.IsDev() { - return "fly-otel-collector-dev.fly.dev" - } - return "fly-otel-collector-prod.fly.dev" } diff --git a/internal/uiex/builders.go b/internal/uiex/builders.go new file mode 100644 index 0000000000..ed84dd1011 --- /dev/null +++ b/internal/uiex/builders.go @@ -0,0 +1,75 @@ +package uiex + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + + "github.com/superfly/flyctl/internal/config" +) + +type CreateFlyManagedBuilderParams struct { + Region string `json:"region"` +} +type CreateFlyManagedBuilderInput struct { + Builder CreateFlyManagedBuilderParams `json:"builder"` +} + +type FlyManagedBuilder struct { + AppName string `json:"app_name"` + MachineID string `json:"machine_id"` +} + +type CreateFlyManagedBuilderResponse struct { + Data FlyManagedBuilder `json:"data"` + Errors DetailedErrors `json:"errors"` +} + +func (c *Client) CreateFlyManagedBuilder(ctx context.Context, orgSlug string, region string) (CreateFlyManagedBuilderResponse, error) { + var response CreateFlyManagedBuilderResponse + cfg := config.FromContext(ctx) + url := fmt.Sprintf("%s/api/v1/organizations/%s/builders", c.baseUrl, orgSlug) + + input := &CreateFlyManagedBuilderInput{ + Builder: CreateFlyManagedBuilderParams{ + Region: region, + }, + } + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(input); err != nil { + return response, err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, &buf) + if err != nil { + return response, fmt.Errorf("failed to create request: %w", err) + } + + req.Header.Add("Authorization", "Bearer "+cfg.Tokens.GraphQL()) + req.Header.Add("Content-Type", "application/json") + + res, err := c.httpClient.Do(req) + if err != nil { + return response, err + } + defer res.Body.Close() + body, err := io.ReadAll(res.Body) + if err != nil { + return response, fmt.Errorf("failed to read response body: %w", err) + } + + switch res.StatusCode { + case http.StatusCreated: + if err = json.Unmarshal(body, &response); err != nil { + return response, fmt.Errorf("failed to decode response, please try again: %w", err) + } + + return response, nil + + default: + return response, fmt.Errorf("builder creation failed, please try again (status %d): %s", res.StatusCode, string(body)) + } +} diff --git a/internal/uiex/client.go b/internal/uiex/client.go new file mode 100644 index 0000000000..4fdf82c749 --- /dev/null +++ b/internal/uiex/client.go @@ -0,0 +1,67 @@ +package uiex + +import ( + "context" + "fmt" + "net/http" + "net/url" + "os" + + "github.com/superfly/fly-go" + "github.com/superfly/fly-go/tokens" + "github.com/superfly/flyctl/internal/httptracing" + "github.com/superfly/flyctl/internal/logger" +) + +type Client struct { + baseUrl *url.URL + tokens *tokens.Tokens + httpClient *http.Client + userAgent string +} + +type NewClientOpts struct { + // optional, sent with requests + UserAgent string + + // URL used when connecting via usermode wireguard. + BaseURL *url.URL + + Tokens *tokens.Tokens + + // optional: + Logger fly.Logger + + // optional, used to construct the underlying HTTP client + Transport http.RoundTripper +} + +func NewWithOptions(ctx context.Context, opts NewClientOpts) (*Client, error) { + var err error + uiexBaseURL := os.Getenv("FLY_UIEX_BASE_URL") + + if uiexBaseURL == "" { + uiexBaseURL = "https://api.fly.io" + } + uiexUrl, err := url.Parse(uiexBaseURL) + if err != nil { + return nil, fmt.Errorf("invalid FLY_UIEX_BASE_URL '%s' with error: %w", uiexBaseURL, err) + } + + httpClient, err := fly.NewHTTPClient(logger.MaybeFromContext(ctx), httptracing.NewTransport(http.DefaultTransport)) + if err != nil { + return nil, fmt.Errorf("uiex: can't setup HTTP client to %s: %w", uiexUrl.String(), err) + } + + userAgent := "flyctl" + if opts.UserAgent != "" { + userAgent = opts.UserAgent + } + + return &Client{ + baseUrl: uiexUrl, + tokens: opts.Tokens, + httpClient: httpClient, + userAgent: userAgent, + }, nil +} diff --git a/internal/uiex/managed_postgres.go b/internal/uiex/managed_postgres.go new file mode 100644 index 0000000000..67a321aecf --- /dev/null +++ b/internal/uiex/managed_postgres.go @@ -0,0 +1,377 @@ +package uiex + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + + "github.com/superfly/fly-go" + "github.com/superfly/flyctl/internal/config" +) + +type ManagedClusterIpAssignments struct { + Direct string `json:"direct"` +} + +type MPGRegion struct { + Code string `json:"code"` // e.g., "fra" + Available bool `json:"available"` // Whether this region supports MPG +} + +type ListMPGRegionsResponse struct { + Data []MPGRegion `json:"data"` +} + +type ManagedCluster struct { + Id string `json:"id"` + Name string `json:"name"` + Region string `json:"region"` + Status string `json:"status"` + Plan string `json:"plan"` + Disk int `json:"disk"` + Replicas int `json:"replicas"` + Organization fly.Organization `json:"organization"` + IpAssignments ManagedClusterIpAssignments `json:"ip_assignments"` +} + +type ListManagedClustersResponse struct { + Data []ManagedCluster `json:"data"` +} + +type GetManagedClusterCredentialsResponse struct { + Status string `json:"status"` + User string `json:"user"` + Password string `json:"password"` + DBName string `json:"dbname"` + ConnectionUri string `json:"pgbouncer_uri"` +} + +type GetManagedClusterResponse struct { + Data ManagedCluster `json:"data"` + Credentials GetManagedClusterCredentialsResponse `json:"credentials"` +} + +func (c *Client) ListManagedClusters(ctx context.Context, orgSlug string) (ListManagedClustersResponse, error) { + var response ListManagedClustersResponse + + cfg := config.FromContext(ctx) + url := fmt.Sprintf("%s/api/v1/organizations/%s/postgres", c.baseUrl, orgSlug) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return response, fmt.Errorf("failed to create request: %w", err) + } + + req.Header.Add("Authorization", "Bearer "+cfg.Tokens.GraphQL()) + req.Header.Add("Content-Type", "application/json") + + res, err := c.httpClient.Do(req) + if err != nil { + return response, err + } + defer res.Body.Close() + + switch res.StatusCode { + case http.StatusOK: + if err = json.NewDecoder(res.Body).Decode(&response); err != nil { + return response, fmt.Errorf("failed to decode response, please try again: %w", err) + } + return response, nil + case http.StatusNotFound: + return response, err + default: + return response, err + } + +} + +func (c *Client) GetManagedCluster(ctx context.Context, orgSlug string, id string) (GetManagedClusterResponse, error) { + var response GetManagedClusterResponse + cfg := config.FromContext(ctx) + url := fmt.Sprintf("%s/api/v1/organizations/%s/postgres/%s", c.baseUrl, orgSlug, id) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return response, fmt.Errorf("failed to create request: %w", err) + } + + req.Header.Add("Authorization", "Bearer "+cfg.Tokens.GraphQL()) + req.Header.Add("Content-Type", "application/json") + + res, err := c.httpClient.Do(req) + if err != nil { + return response, err + } + defer res.Body.Close() + + switch res.StatusCode { + case http.StatusOK: + if err = json.NewDecoder(res.Body).Decode(&response); err != nil { + return response, fmt.Errorf("failed to decode response, please try again: %w", err) + } + return response, nil + case http.StatusNotFound: + return response, err + default: + return response, err + } +} + +func (c *Client) GetManagedClusterById(ctx context.Context, id string) (GetManagedClusterResponse, error) { + var response GetManagedClusterResponse + cfg := config.FromContext(ctx) + url := fmt.Sprintf("%s/api/v1/postgres/%s", c.baseUrl, id) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return response, fmt.Errorf("failed to create request: %w", err) + } + + req.Header.Add("Authorization", "Bearer "+cfg.Tokens.GraphQL()) + req.Header.Add("Content-Type", "application/json") + + res, err := c.httpClient.Do(req) + if err != nil { + return response, err + } + defer res.Body.Close() + + switch res.StatusCode { + case http.StatusOK: + if err = json.NewDecoder(res.Body).Decode(&response); err != nil { + return response, fmt.Errorf("failed to decode response, please try again: %w", err) + } + + return response, nil + case http.StatusNotFound: + return response, fmt.Errorf("Cluster %s not found", id) + default: + return response, fmt.Errorf("Something went wrong") + } +} + +type CreateUserInput struct { + DbName string `json:"db_name"` + UserName string `json:"user_name"` +} + +type DetailedErrors struct { + Detail string `json:"detail"` +} + +type CreateUserResponse struct { + ConnectionUri string `json:"connection_uri"` + Ok bool `json:"ok"` + Errors DetailedErrors `json:"errors"` +} + +func (c *Client) CreateUser(ctx context.Context, id string, input CreateUserInput) (CreateUserResponse, error) { + var response CreateUserResponse + cfg := config.FromContext(ctx) + url := fmt.Sprintf("%s/api/v1/postgres/%s/users", c.baseUrl, id) + + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(input); err != nil { + return response, err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, &buf) + if err != nil { + return response, fmt.Errorf("failed to create request: %w", err) + } + + req.Header.Add("Authorization", "Bearer "+cfg.Tokens.GraphQL()) + req.Header.Add("Content-Type", "application/json") + + res, err := c.httpClient.Do(req) + if err != nil { + return response, err + } + defer res.Body.Close() + + switch res.StatusCode { + case http.StatusCreated: + if err = json.NewDecoder(res.Body).Decode(&response); err != nil { + return response, fmt.Errorf("failed to decode response, please try again: %w", err) + } + + if !response.Ok { + if response.Errors.Detail != "" { + return response, fmt.Errorf("Failed to create user with error: %s", response.Errors.Detail) + } else { + return response, fmt.Errorf("Something went wrong creating user. Please try again") + } + } + + return response, nil + + default: + if err = json.NewDecoder(res.Body).Decode(&response); err != nil { + return response, fmt.Errorf("failed to decode response, please try again: %w", err) + } + + if response.Errors.Detail != "" { + return response, fmt.Errorf("Failed to create user with error: %s", response.Errors.Detail) + } + + return response, fmt.Errorf("Failed to create user with error: %s", response.Errors.Detail) + } +} + +type CreateClusterInput struct { + Name string `json:"name"` + Region string `json:"region"` + Plan string `json:"plan"` + OrgSlug string `json:"org_slug"` + Disk int `json:"disk"` + PostGISEnabled bool `json:"postgis_enabled"` +} + +type CreateClusterResponse struct { + Ok bool `json:"ok"` + Errors DetailedErrors `json:"errors"` + Data struct { + Id string `json:"id"` + Name string `json:"name"` + Status *string `json:"status"` + Plan string `json:"plan"` + Environment *string `json:"environment"` + Region string `json:"region"` + Organization fly.Organization `json:"organization"` + Replicas int `json:"replicas"` + Disk int `json:"disk"` + IpAssignments ManagedClusterIpAssignments `json:"ip_assignments"` + PostGISEnabled bool `json:"postgis_enabled"` + } `json:"data"` +} + +func (c *Client) CreateCluster(ctx context.Context, input CreateClusterInput) (CreateClusterResponse, error) { + var response CreateClusterResponse + cfg := config.FromContext(ctx) + url := fmt.Sprintf("%s/api/v1/organizations/%s/postgres", c.baseUrl, input.OrgSlug) + + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(input); err != nil { + return response, fmt.Errorf("failed to encode request body: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, &buf) + if err != nil { + return response, fmt.Errorf("failed to create request: %w", err) + } + + req.Header.Add("Authorization", "Bearer "+cfg.Tokens.GraphQL()) + req.Header.Add("Content-Type", "application/json") + + res, err := c.httpClient.Do(req) + if err != nil { + return response, err + } + defer res.Body.Close() + + // Read the response body to get error details + body, err := io.ReadAll(res.Body) + if err != nil { + return response, fmt.Errorf("failed to read response body: %w", err) + } + + switch res.StatusCode { + case http.StatusCreated: + if err = json.Unmarshal(body, &response); err != nil { + return response, fmt.Errorf("failed to decode response: %w", err) + } + return response, nil + case http.StatusNotFound: + return response, fmt.Errorf("organization %s not found", input.OrgSlug) + case http.StatusForbidden: + if err = json.Unmarshal(body, &response); err == nil { + if response.Errors.Detail != "" { + return response, errors.New(response.Errors.Detail) + } + } + + return response, fmt.Errorf("failed to create cluster (status %d): %s", res.StatusCode, string(body)) + case http.StatusInternalServerError: + return response, fmt.Errorf("server error: %s", string(body)) + default: + return response, fmt.Errorf("failed to create cluster (status %d): %s", res.StatusCode, string(body)) + } +} + +// ListMPGRegions returns the list of regions available for Managed Postgres +// TODO: Implement the actual API endpoint on the backend +func (c *Client) ListMPGRegions(ctx context.Context, orgSlug string) (ListMPGRegionsResponse, error) { + var response ListMPGRegionsResponse + cfg := config.FromContext(ctx) + url := fmt.Sprintf("%s/api/v1/organizations/%s/postgres/regions", c.baseUrl, orgSlug) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return response, fmt.Errorf("failed to create request: %w", err) + } + + req.Header.Add("Authorization", "Bearer "+cfg.Tokens.GraphQL()) + req.Header.Add("Content-Type", "application/json") + + res, err := c.httpClient.Do(req) + if err != nil { + return response, err + } + defer res.Body.Close() + + body, err := io.ReadAll(res.Body) + if err != nil { + return response, fmt.Errorf("failed to read response body: %w", err) + } + + switch res.StatusCode { + case http.StatusOK: + if err = json.Unmarshal(body, &response); err != nil { + return response, fmt.Errorf("failed to decode response, please try again: %w", err) + } + return response, nil + default: + return response, fmt.Errorf("failed to list MPG regions (status %d): %s", res.StatusCode, string(body)) + } + +} + +// DestroyCluster permanently destroys a managed Postgres cluster +func (c *Client) DestroyCluster(ctx context.Context, orgSlug string, id string) error { + cfg := config.FromContext(ctx) + url := fmt.Sprintf("%s/api/v1/organizations/%s/postgres/%s", c.baseUrl, orgSlug, id) + + req, err := http.NewRequestWithContext(ctx, http.MethodDelete, url, nil) + if err != nil { + return fmt.Errorf("failed to create request: %w", err) + } + + req.Header.Add("Authorization", "Bearer "+cfg.Tokens.GraphQL()) + req.Header.Add("Content-Type", "application/json") + + res, err := c.httpClient.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + + body, err := io.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("failed to read response body: %w", err) + } + + switch res.StatusCode { + case http.StatusOK, http.StatusNoContent: + return nil + case http.StatusNotFound: + return fmt.Errorf("cluster %s not found", id) + case http.StatusForbidden: + return fmt.Errorf("access denied: you don't have permission to destroy cluster %s", id) + default: + return fmt.Errorf("failed to destroy cluster (status %d): %s", res.StatusCode, string(body)) + } +} diff --git a/internal/uiexutil/client.go b/internal/uiexutil/client.go new file mode 100644 index 0000000000..27ac129132 --- /dev/null +++ b/internal/uiexutil/client.go @@ -0,0 +1,36 @@ +package uiexutil + +import ( + "context" + + "github.com/superfly/flyctl/internal/uiex" +) + +type Client interface { + // MPGs + ListMPGRegions(ctx context.Context, orgSlug string) (uiex.ListMPGRegionsResponse, error) + ListManagedClusters(ctx context.Context, orgSlug string) (uiex.ListManagedClustersResponse, error) + GetManagedCluster(ctx context.Context, orgSlug string, id string) (uiex.GetManagedClusterResponse, error) + GetManagedClusterById(ctx context.Context, id string) (uiex.GetManagedClusterResponse, error) + CreateUser(ctx context.Context, id string, input uiex.CreateUserInput) (uiex.CreateUserResponse, error) + CreateCluster(ctx context.Context, input uiex.CreateClusterInput) (uiex.CreateClusterResponse, error) + DestroyCluster(ctx context.Context, orgSlug string, id string) error + + // Builders + CreateFlyManagedBuilder(ctx context.Context, orgSlug string, region string) (uiex.CreateFlyManagedBuilderResponse, error) +} + +type contextKey struct{} + +var clientContextKey = &contextKey{} + +// NewContextWithClient derives a Context that carries c from ctx. +func NewContextWithClient(ctx context.Context, c Client) context.Context { + return context.WithValue(ctx, clientContextKey, c) +} + +// ClientFromContext returns the Client ctx carries. +func ClientFromContext(ctx context.Context) Client { + c, _ := ctx.Value(clientContextKey).(Client) + return c +} diff --git a/internal/uiexutil/uiexutil.go b/internal/uiexutil/uiexutil.go new file mode 100644 index 0000000000..06138e0b43 --- /dev/null +++ b/internal/uiexutil/uiexutil.go @@ -0,0 +1,20 @@ +package uiexutil + +import ( + "context" + + "github.com/superfly/flyctl/internal/config" + "github.com/superfly/flyctl/internal/logger" + "github.com/superfly/flyctl/internal/uiex" +) + +func NewClientWithOptions(ctx context.Context, opts uiex.NewClientOpts) (*uiex.Client, error) { + if opts.Tokens == nil { + opts.Tokens = config.Tokens(ctx) + } + + if v := logger.MaybeFromContext(ctx); v != nil && opts.Logger == nil { + opts.Logger = v + } + return uiex.NewWithOptions(ctx, opts) +} diff --git a/internal/version/version_test.go b/internal/version/version_test.go index 8eb8e1757e..408805a665 100644 --- a/internal/version/version_test.go +++ b/internal/version/version_test.go @@ -3,12 +3,12 @@ package version import ( "encoding/json" "math/rand" + "slices" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "golang.org/x/exp/slices" ) func TestEncode(t *testing.T) { diff --git a/iostreams/color.go b/iostreams/color.go index d12949a658..4f7e953644 100644 --- a/iostreams/color.go +++ b/iostreams/color.go @@ -2,6 +2,8 @@ package iostreams import ( "fmt" + "github.com/samber/lo" + "math" "os" "strings" @@ -48,16 +50,28 @@ func Is256ColorSupported() bool { strings.Contains(colorterm, "truecolor") } -func NewColorScheme(enabled, is256enabled bool) *ColorScheme { +func IsTrueColor() bool { + term := os.Getenv("TERM") + colorterm := os.Getenv("COLORTERM") + + return strings.Contains(term, "24bit") || + strings.Contains(term, "truecolor") || + strings.Contains(colorterm, "24bit") || + strings.Contains(colorterm, "truecolor") +} + +func NewColorScheme(enabled, is256enabled, trueColor bool) *ColorScheme { return &ColorScheme{ enabled: enabled, is256enabled: is256enabled, + trueColor: trueColor, } } type ColorScheme struct { enabled bool is256enabled bool + trueColor bool } func (c *ColorScheme) Bold(t string) string { @@ -184,3 +198,24 @@ func (c *ColorScheme) ColorFromString(s string) func(string) string { return fn } + +// RedGreenGradient wraps a string in an ANSI red-green color gradient at a value between 0-1. +func (c *ColorScheme) RedGreenGradient(s string, value float64) string { + if !c.enabled { + return s + } + value = lo.Clamp(value, 0, 1) + if c.trueColor { + return fmt.Sprintf("\x1b[38;2;%d;%d;%dm%s\x1b[0m", + int(math.Min(255, 2*255*(1-value))), + int(math.Min(255, 2*255*value)), + 0, + s, + ) + } + colors := []string{"red", "yellow+h", "green", "green+h", "green+bh"} + if c.is256enabled { + colors = []string{"196", "202", "208", "214", "220", "190", "154", "118", "82", "46"} + } + return ansi.Color(s, colors[int(float64(len(colors)-1)*value)]) +} diff --git a/iostreams/iostreams.go b/iostreams/iostreams.go index 27969c47f6..a5cc86e764 100644 --- a/iostreams/iostreams.go +++ b/iostreams/iostreams.go @@ -28,10 +28,11 @@ type IOStreams struct { ErrOut io.Writer // the original (non-colorable) output stream - originalOut io.Writer - colorEnabled bool - is256enabled bool - terminalTheme string + originalOut io.Writer + colorEnabled bool + is256enabled bool + isTrueColorEnabled bool + terminalTheme string progressIndicatorEnabled bool progressIndicator *spinner.Spinner @@ -59,6 +60,10 @@ func (s *IOStreams) ColorSupport256() bool { return s.is256enabled } +func (s *IOStreams) ColorSupportTrueColor() bool { + return s.isTrueColorEnabled +} + func (s *IOStreams) DetectTerminalTheme() string { if !s.ColorEnabled() { s.terminalTheme = "none" @@ -290,7 +295,7 @@ func (s *IOStreams) TerminalWidth() int { } func (s *IOStreams) ColorScheme() *ColorScheme { - return NewColorScheme(s.ColorEnabled(), s.ColorSupport256()) + return NewColorScheme(s.ColorEnabled(), s.ColorSupport256(), s.ColorSupportTrueColor()) } func (s *IOStreams) ReadUserFile(fn string) ([]byte, error) { @@ -371,13 +376,14 @@ func System() *IOStreams { pagerCommand := os.Getenv("PAGER") io := &IOStreams{ - In: os.Stdin, - originalOut: os.Stdout, - Out: colorableOut(os.Stdout), - ErrOut: colorable.NewColorable(os.Stderr), - colorEnabled: EnvColorForced() || (!EnvColorDisabled() && stdoutIsTTY), - is256enabled: Is256ColorSupported(), - pagerCommand: pagerCommand, + In: os.Stdin, + originalOut: os.Stdout, + Out: colorableOut(os.Stdout), + ErrOut: colorable.NewColorable(os.Stderr), + colorEnabled: EnvColorForced() || (!EnvColorDisabled() && stdoutIsTTY), + is256enabled: Is256ColorSupported(), + isTrueColorEnabled: IsTrueColor(), + pagerCommand: pagerCommand, } if stdoutIsTTY && stderrIsTTY { diff --git a/proxy/connect.go b/proxy/connect.go index 691a6fff19..472deaaf25 100644 --- a/proxy/connect.go +++ b/proxy/connect.go @@ -125,7 +125,7 @@ func NewServer(ctx context.Context, p *ConnectParams) (*Server, error) { } } - fmt.Fprintf(io.Out, "Proxying local port %s to remote %s\n", localPort, remoteAddr) + fmt.Fprintf(io.Out, "Proxying localhost:%s to remote %s\n", localPort, remoteAddr) return &Server{ Addr: remoteAddr, diff --git a/retry/retry.go b/retry/retry.go deleted file mode 100644 index 1b89a106c2..0000000000 --- a/retry/retry.go +++ /dev/null @@ -1,49 +0,0 @@ -package retry - -import ( - "context" - "time" - - "github.com/jpillora/backoff" -) - -// Retry attempts to execute the provided function up to 'attempts' times, -// respecting the context for cancellation and timeout -func Retry(ctx context.Context, fn func() error, attempts uint) (err error) { - for i := attempts; i > 0; i-- { - - if ctx.Err() != nil { - return ctx.Err() - } - - err = fn() - if err == nil { - return nil - } - } - - return err -} - -// Retry attempts to execute the provided function up to 'attempts' times with an -// exponential backoff strategy, respecting the context for cancellation and timeout -func RetryBackoff(ctx context.Context, fn func() error, attempts uint, backoffStrategy *backoff.Backoff) (err error) { - for i := attempts; i > 0; i-- { - if ctx.Err() != nil { - return ctx.Err() - } - - err = fn() - if err == nil { - return nil - } - - select { - case <-time.After(backoffStrategy.Duration()): - case <-ctx.Done(): - return ctx.Err() - } - } - - return err -} diff --git a/retry/retry_test.go b/retry/retry_test.go deleted file mode 100644 index c81af79183..0000000000 --- a/retry/retry_test.go +++ /dev/null @@ -1,138 +0,0 @@ -package retry - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/jpillora/backoff" - "github.com/stretchr/testify/assert" -) - -var errFail = errors.New("fail") - -func TestRetry(t *testing.T) { - t.Parallel() - t.Run("testSuccess", testSuccess) - t.Run("testFail1", testFail1) - t.Run("testFail2", testFail2) - t.Run("testFailAll", testFailAll) - t.Run("testContextTimeout", testContextTimeout) // Added test - t.Run("testRetryBackoffContextTimeout", testRetryBackoff) // Test for RetryBackoff -} - -func testSuccess(t *testing.T) { - var count int - - fn := func() error { - count++ - return nil - } - - err := Retry(context.Background(), fn, 3) - assert.NoError(t, err) - assert.Equal(t, 1, count) -} - -func testFail1(t *testing.T) { - var count int - - fn := func() error { - count++ - if count == 1 { - return errors.New("1") - } - return nil - } - - err := Retry(context.Background(), fn, 3) - assert.NoError(t, err) - assert.Equal(t, 2, count) -} - -func testFail2(t *testing.T) { - var count int - - fn := func() error { - count++ - if count <= 2 { - return errFail - } - return nil - } - - err := Retry(context.Background(), fn, 3) - assert.NoError(t, err) - assert.Equal(t, 3, count) -} - -func testFailAll(t *testing.T) { - var count int - - fn := func() error { - count++ - return errFail - } - - err := Retry(context.Background(), fn, 3) - assert.ErrorIs(t, err, errFail) - assert.Equal(t, 3, count) -} - -func testContextTimeout(t *testing.T) { - var count int - - fn := func() error { - count++ - time.Sleep(50 * time.Millisecond) - return errFail - } - - timeoutDuration := 100 * time.Millisecond - ctx, cancel := context.WithTimeout(context.Background(), timeoutDuration) - defer cancel() - - startTime := time.Now() - - err := Retry(ctx, fn, 10) - elapsed := time.Since(startTime) - - assert.ErrorIs(t, err, context.DeadlineExceeded) - assert.LessOrEqual(t, count, 3) - assert.GreaterOrEqual(t, elapsed, timeoutDuration) -} - -func testRetryBackoff(t *testing.T) { - var count int - - fn := func() error { - count++ - time.Sleep(50 * time.Millisecond) - return errFail - } - - timeoutDuration := 200 * time.Millisecond - ctx, cancel := context.WithTimeout(context.Background(), timeoutDuration) - defer cancel() - - b := &backoff.Backoff{ - Min: 10 * time.Millisecond, - Max: 50 * time.Millisecond, - Factor: 2, - } - - startTime := time.Now() - - err := RetryBackoff(ctx, fn, 10, b) - - elapsed := time.Since(startTime) - - assert.ErrorIs(t, err, context.DeadlineExceeded, "expected context deadline exceeded error") - - assert.LessOrEqual(t, count, 4, "count should not exceed the number of attempts before timeout") - - assert.GreaterOrEqual(t, elapsed, timeoutDuration, "elapsed time should be at least the timeout duration") - - t.Logf("RetryBackoff - Attempts made: %d, Elapsed time: %v", count, elapsed) -} diff --git a/scanner/jsFramework.go b/scanner/jsFramework.go index c414711f86..2897abfcc5 100644 --- a/scanner/jsFramework.go +++ b/scanner/jsFramework.go @@ -183,7 +183,7 @@ func configureJsFramework(sourceDir string, config *ScannerConfig) (*SourceInfo, srcInfo.ObjectStorageDesired = true } - // if prisma is used, provider is definative + // if prisma is used, provider is definitive if checksPass(sourceDir+"/prisma", dirContains("*.prisma", "provider")) { if checksPass(sourceDir+"/prisma", dirContains("*.prisma", "postgresql")) { srcInfo.DatabaseDesired = DatabaseKindPostgres @@ -191,6 +191,7 @@ func configureJsFramework(sourceDir string, config *ScannerConfig) (*SourceInfo, srcInfo.DatabaseDesired = DatabaseKindMySQL } else if checksPass(sourceDir+"/prisma", dirContains("*.prisma", "sqlite")) { srcInfo.DatabaseDesired = DatabaseKindSqlite + srcInfo.ObjectStorageDesired = true } } @@ -247,6 +248,8 @@ func configureJsFramework(sourceDir string, config *ScannerConfig) (*SourceInfo, srcInfo.Family = "Nust" } else if devdeps["nuxt"] != nil || deps["nuxt"] != nil { srcInfo.Family = "Nuxt" + } else if checksPass(sourceDir, fileExists("shopify.app.toml")) { + srcInfo.Family = "Shopify" } else if deps["remix"] != nil || deps["@remix-run/node"] != nil { srcInfo.Family = "Remix" } else if devdeps["@sveltejs/kit"] != nil { @@ -278,15 +281,30 @@ func JsFrameworkCallback(appName string, srcInfo *SourceInfo, plan *plan.LaunchP } } - // generate Dockerfile if it doesn't already exist + // add litestream if object storage is present and database is sqlite3 + if plan.ObjectStorage.Provider() != nil && srcInfo.DatabaseDesired == DatabaseKindSqlite { + flags = append(flags, "--litestream") + } + + // run dockerfile-node if Dockerfile doesn't already exist, or there is a database to be set up _, err = os.Stat("Dockerfile") - if errors.Is(err, fs.ErrNotExist) { + if errors.Is(err, fs.ErrNotExist) || srcInfo.DatabaseDesired == DatabaseKindSqlite || srcInfo.DatabaseDesired == DatabaseKindPostgres { var args []string + // add --skip flag if Dockerfile already exists + if err == nil { + flags = append([]string{"--skip"}, flags...) + } + _, err = os.Stat("node_modules") if errors.Is(err, fs.ErrNotExist) { // no existing node_modules directory: run package directly args = []string{"npx", "--yes", "@flydotio/dockerfile@latest"} + + // add additional flags from launch command + if len(flags) > 0 { + args = append(args, flags...) + } } else { // build command to install package using preferred package manager args = []string{"npm", "install", "@flydotio/dockerfile@latest", "--save-dev"} @@ -335,7 +353,15 @@ func JsFrameworkCallback(appName string, srcInfo *SourceInfo, plan *plan.LaunchP cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { - return fmt.Errorf("failed to install @flydotio/dockerfile: %w", err) + if exitError, ok := err.(*exec.ExitError); ok && exitError.ExitCode() == 42 { + // generator exited with code 42, which means existing + // Dockerfile contains errors which will prevent deployment. + srcInfo.SkipDeploy = true + srcInfo.DeployDocs = "Correct the errors and run 'fly deploy' to deploy your app." + fmt.Println() + } else { + return fmt.Errorf("failed to install @flydotio/dockerfile: %w", err) + } } } @@ -383,12 +409,20 @@ func JsFrameworkCallback(appName string, srcInfo *SourceInfo, plan *plan.LaunchP // execute (via npx, bunx, or bun x) the docker module cmd := exec.Command(xcmdpath, args...) - cmd.Stdin = nil + cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { - return fmt.Errorf("failed to generate Dockerfile: %w", err) + if exitError, ok := err.(*exec.ExitError); ok && exitError.ExitCode() == 42 { + // generator exited with code 42, which means existing + // Dockerfile contains errors which will prevent deployment. + srcInfo.SkipDeploy = true + srcInfo.DeployDocs = "Correct the errors and run 'fly deploy' to deploy your app.\n" + fmt.Println() + } else { + return fmt.Errorf("failed to generate Dockerfile: %w", err) + } } } } @@ -412,13 +446,15 @@ func JsFrameworkCallback(appName string, srcInfo *SourceInfo, plan *plan.LaunchP srcInfo.Family = family // provide some advice - srcInfo.DeployDocs += fmt.Sprintf(` + if srcInfo.DeployDocs == "" { + srcInfo.DeployDocs = fmt.Sprintf(` If you need custom packages installed, or have problems with your deployment build, you may need to edit the Dockerfile for app-specific changes. If you need help, please post on https://community.fly.io. Now: run 'fly deploy' to deploy your %s app. `, srcInfo.Family) + } return nil } diff --git a/scanner/laravel.go b/scanner/laravel.go index 50f874d8fe..302c0deae7 100644 --- a/scanner/laravel.go +++ b/scanner/laravel.go @@ -269,7 +269,7 @@ var redisRegStr = "^[^#]*redis" // extractConnections detects the database connection of a laravel fly app // by checking the .env file in the project's base directory for connection keywords. -// This ignores commented out lines and prioritizes the first connection occurance over others. +// This ignores commented out lines and prioritizes the first connection occurrence over others. // // Returns three variables: // diff --git a/scanner/node.go b/scanner/node.go index e9c0dac1eb..9a8a8134ab 100644 --- a/scanner/node.go +++ b/scanner/node.go @@ -50,12 +50,12 @@ func configureNode(sourceDir string, config *ScannerConfig) (*SourceInfo, error) vars := make(map[string]interface{}) - var yarnVersion string = "latest" + var yarnVersion = "latest" // node-build requires a version, so either use the same version as install locally, // or default to an LTS version - var nodeLtsVersion string = "18.16.0" - var nodeVersion string = nodeLtsVersion + var nodeLtsVersion = "18.16.0" + var nodeVersion = nodeLtsVersion out, err := exec.Command("node", "-v").Output() diff --git a/scanner/phoenix.go b/scanner/phoenix.go index 6b5ffd2b78..a54876f1c5 100644 --- a/scanner/phoenix.go +++ b/scanner/phoenix.go @@ -1,10 +1,12 @@ package scanner import ( + "bufio" "fmt" "os" "os/exec" "path/filepath" + "strings" "github.com/pkg/errors" "github.com/superfly/flyctl/helpers" @@ -142,11 +144,16 @@ a Postgres database. s.ReleaseCmd = "/app/bin/migrate" } else if checksPass(sourceDir, dirContains("mix.exs", "ecto_sqlite3")) { s.DatabaseDesired = DatabaseKindSqlite + s.ObjectStorageDesired = true s.Env["DATABASE_PATH"] = "/mnt/name/name.db" s.Volumes = []Volume{ { - Source: "name", - Destination: "/mnt/name", + Source: "name", + Destination: "/mnt/name", + InitialSize: "1GB", + AutoExtendSizeThreshold: 80, + AutoExtendSizeIncrement: "1GB", + AutoExtendSizeLimit: "10GB", }, } } @@ -162,7 +169,7 @@ a Postgres database. return s, nil } -func PhoenixCallback(appName string, _ *SourceInfo, plan *plan.LaunchPlan, flags []string) error { +func PhoenixCallback(appName string, srcInfo *SourceInfo, plan *plan.LaunchPlan, flags []string) error { envEExPath := "rel/env.sh.eex" envEExContents := ` # configure node for distributed erlang with IPV6 support @@ -202,5 +209,191 @@ export RELEASE_NODE="${FLY_APP_NAME}-${FLY_IMAGE_REF##*-}@${FLY_PRIVATE_IP}" return err } } + + // add Litestream if object storage is present and database is sqlite3 + if plan.ObjectStorage.Provider() != nil && srcInfo.DatabaseDesired == DatabaseKindSqlite { + srcInfo.PostInitCallback = install_litestream + } + return nil } + +// Read the Dockerfile and insert the necessary commands to install Litestream +// and run the Litestream script as the entrypoint. Primary constraint: +// do no harm. If the Dockerfile is not in the expected format, do not modify it. +func install_litestream() error { + // Ensure config directory exists + if _, err := os.Stat("config"); os.IsNotExist(err) { + return nil + } + + // Open original Dockerfile + file, err := os.Open("Dockerfile") + if err != nil { + return err + } + defer file.Close() + + // Create temporary output + var lines []string + + // Variables to track state + workdir := "" + scanner := bufio.NewScanner(file) + insertedLitestreamInstall := false + foundEntrypoint := false + insertedEntrypoint := false + installedWget := false + copiedLitestream := false + + // Read line by line + for scanner.Scan() { + line := scanner.Text() + + // Insert litestream script as entrypoint + if strings.HasPrefix(strings.TrimSpace(line), "CMD ") && !insertedEntrypoint { + script := workdir + "/bin/litestream.sh" + + if foundEntrypoint { + if strings.Contains(line, "CMD [") { + // JSON array format: CMD ["cmd"] + line = strings.Replace(line, "CMD [", fmt.Sprintf("CMD [\"/bin/bash\", \"%s\",", script), 1) + insertedEntrypoint = true + } else if strings.Contains(line, "CMD \"") { + // Shell format with quotes: CMD "cmd" + line = strings.Replace(line, "CMD \"", fmt.Sprintf("CMD \"/bin/bash %s", script), 1) + insertedEntrypoint = true + } + } else { + lines = append(lines, "# Run litestream script as entrypoint") + lines = append(lines, fmt.Sprintf("ENTRYPOINT [\"/bin/bash\", \"%s\"]", script)) + lines = append(lines, "") + insertedEntrypoint = true + } + } + + // Add wget to install litestream + if strings.Contains(line, "build-essential") && !installedWget { + line = strings.Replace(line, "build-essential", "build-essential wget", 1) + installedWget = true + } + + // Copy litestream binary from build stage, and setup from source + if strings.HasPrefix(strings.TrimSpace(line), "USER ") && !copiedLitestream { + lines = append(lines, "# Copy Litestream binary from build stage") + lines = append(lines, "COPY --from=builder /usr/bin/litestream /usr/bin/litestream") + lines = append(lines, "COPY litestream.sh /app/bin/litestream.sh") + lines = append(lines, "COPY config/litestream.yml /etc/litestream.yml") + lines = append(lines, "") + copiedLitestream = true + } + + // Append original line + lines = append(lines, line) + + // Install litestream + if strings.Contains(line, "apt-get clean") && !insertedLitestreamInstall { + lines = append(lines, "") + lines = append(lines, "# Install litestream") + lines = append(lines, "ARG LITESTREAM_VERSION=0.3.13") + lines = append(lines, "RUN wget https://github.com/benbjohnson/litestream/releases/download/v${LITESTREAM_VERSION}/litestream-v${LITESTREAM_VERSION}-linux-amd64.deb \\") + lines = append(lines, " && dpkg -i litestream-v${LITESTREAM_VERSION}-linux-amd64.deb") + + insertedLitestreamInstall = true + } + + // Check for existing entrypoint + if strings.HasPrefix(strings.TrimSpace(line), "ENTRYPOINT ") { + foundEntrypoint = true + } + + // Track WORKDIR + if strings.HasPrefix(strings.TrimSpace(line), "WORKDIR ") { + workdir = strings.Split(strings.TrimSpace(line), " ")[1] + workdir = strings.Trim(workdir, "\"") + workdir = strings.TrimRight(workdir, "/") + } + } + + // Check for errors + if err := scanner.Err(); err != nil { + return err + } + + // If we didn't complete the insertion, return without writing to file + if !insertedLitestreamInstall || !insertedEntrypoint || !copiedLitestream { + fmt.Println("Failed to insert Litestream installation commands. Skipping Litestream installation.") + return nil + } else { + fmt.Fprintln(os.Stdout, "Updating Dockerfile to install Litestream") + } + + // Write dockerfile back to file + dockerfile, err := os.Create("Dockerfile") + if err != nil { + return err + } + defer dockerfile.Close() + + for _, line := range lines { + fmt.Fprintln(dockerfile, line) + } + + // Create litestream.sh + script, err := os.Create("litestream.sh") + if err != nil { + return bufio.ErrBadReadCount + } + defer script.Close() + + _, err = fmt.Fprint(script, strings.TrimSpace(` +#!/usr/bin/env bash +set -e + +# If db doesn't exist, try restoring from object storage +if [ ! -f "$DATABASE_PATH" ] && [ -n "$BUCKET_NAME" ]; then + litestream restore -if-replica-exists "$DATABASE_PATH" +fi + +# Migrate database +/app/bin/migrate + +# Launch application +if [ -n "$BUCKET_NAME" ]; then + litestream replicate -exec "${*}" +else + exec "${@}" +fi + `)) + + if err != nil { + return err + } + + // Create litestream.yml + config, err := os.Create("config/litestream.yml") + if err != nil { + return err + } + + defer config.Close() + + _, err = fmt.Fprint(config, strings.TrimSpace(strings.ReplaceAll(` +# This is the configuration file for litestream. +# +# For more details, see: https://litestream.io/reference/config/ +# +dbs: +- path: $DATABASE_PATH + replicas: + - type: s3 + endpoint: $AWS_ENDPOINT_URL_S3 + bucket: $BUCKET_NAME + path: litestream${DATABASE_PATH} + access-key-id: $AWS_ACCESS_KEY_ID + secret-access-key: $AWS_SECRET_ACCESS_KEY + region: $AWS_REGION +`, "\t", " "))) + + return err +} diff --git a/scanner/python.go b/scanner/python.go index e4099ea44d..ad7a28058a 100644 --- a/scanner/python.go +++ b/scanner/python.go @@ -116,6 +116,7 @@ func parsePyDep(dep string) string { dep = strings.ToLower(dep) dep = strings.Split(dep, ";")[0] dep = strings.Split(dep, " ")[0] + dep = strings.Split(dep, "[")[0] dep = strings.Split(dep, "==")[0] dep = strings.Split(dep, ">")[0] dep = strings.Split(dep, "<")[0] diff --git a/scanner/rails.go b/scanner/rails.go index 43554fe4a4..1d85f6d88b 100644 --- a/scanner/rails.go +++ b/scanner/rails.go @@ -15,7 +15,7 @@ import ( "github.com/superfly/flyctl/helpers" "github.com/superfly/flyctl/internal/command/launch/plan" "github.com/superfly/flyctl/internal/flyerr" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" ) var healthcheck_channel = make(chan string) @@ -34,7 +34,30 @@ func configureRails(sourceDir string, config *ScannerConfig) (*SourceInfo, error return nil, nil } + // find absolute pat to bundle, ruby executables + // see: https://tip.golang.org/doc/go1.19#os-exec-path var err error + bundle, err = exec.LookPath("bundle") + if err != nil { + if errors.Is(err, exec.ErrDot) { + bundle, err = filepath.Abs(bundle) + } + + if err != nil { + return nil, errors.Wrap(err, "failure finding bundle executable") + } + } + + ruby, err = exec.LookPath("ruby") + if err != nil { + if errors.Is(err, exec.ErrDot) { + ruby, err = filepath.Abs(ruby) + } + + if err != nil { + return nil, errors.Wrap(err, "failure finding ruby executable") + } + } s := &SourceInfo{ Family: "Rails", @@ -47,7 +70,40 @@ func configureRails(sourceDir string, config *ScannerConfig) (*SourceInfo, error // add ruby version - rubyVersion, _ := extractRubyVersion("Gemfile.lock", "Gemfile", ".ruby-version") + var rubyVersion string + + // add ruby version from Gemfile + gemfile, err := os.ReadFile("Gemfile") + if err == nil { + re := regexp.MustCompile(`(?m)^ruby\s+["'](\d+\.\d+\.\d+)["']`) + matches := re.FindStringSubmatch(string(gemfile)) + if len(matches) >= 2 { + rubyVersion = matches[1] + } + } + + if rubyVersion == "" { + // add ruby version from .ruby-version file + versionFile, err := os.ReadFile(".ruby-version") + if err == nil { + re := regexp.MustCompile(`ruby-(\d+\.\d+\.\d+)`) + matches := re.FindStringSubmatch(string(versionFile)) + if len(matches) >= 2 { + rubyVersion = matches[1] + } + } + } + + if rubyVersion == "" { + versionOutput, err := exec.Command("ruby", "--version").Output() + if err == nil { + re := regexp.MustCompile(`ruby (\d+\.\d+\.\d+)`) + matches := re.FindStringSubmatch(string(versionOutput)) + if len(matches) >= 2 { + rubyVersion = matches[1] + } + } + } if rubyVersion != "" { s.Runtime = plan.RuntimeStruct{Language: "ruby", Version: rubyVersion} @@ -65,10 +121,15 @@ func configureRails(sourceDir string, config *ScannerConfig) (*SourceInfo, error // postgresql s.DatabaseDesired = DatabaseKindPostgres s.SkipDatabase = false - } else { + } else if checksPass(sourceDir, dirContains("Dockerfile", "sqlite3")) { // sqlite s.DatabaseDesired = DatabaseKindSqlite s.SkipDatabase = true + s.ObjectStorageDesired = true + } else { + // no database + s.DatabaseDesired = DatabaseKindNone + s.SkipDatabase = true } // enable redis if there are any action cable / anycable channels @@ -147,7 +208,6 @@ func configureRails(sourceDir string, config *ScannerConfig) (*SourceInfo, error s.Port = port } } - s.Runtime.NoInstallRequired = true } // master.key comes with Rails apps from v5.2 onwards, but may not be present @@ -217,32 +277,14 @@ Once ready: run 'fly deploy' to deploy your Rails app. ` } - // find absolute pat to bundle, ruby executables - // see: https://tip.golang.org/doc/go1.19#os-exec-path - bundle, err = exec.LookPath("bundle") - if err != nil { - if errors.Is(err, exec.ErrDot) { - bundle, err = filepath.Abs(bundle) - } - - if err != nil { - return nil, errors.Wrap(err, "failure finding bundle executable") - } - } - - ruby, err = exec.LookPath("ruby") - if err != nil { - if errors.Is(err, exec.ErrDot) { - ruby, err = filepath.Abs(ruby) - } - + // fetch healthcheck route in a separate thread + go func() { + ruby, err := exec.LookPath("ruby") if err != nil { - return nil, errors.Wrap(err, "failure finding ruby executable") + healthcheck_channel <- "" + return } - } - // fetch healthcheck route in a separate thread - go func() { out, err := exec.Command(ruby, binrails, "runner", "puts Rails.application.routes.url_helpers.rails_health_check_path").Output() @@ -267,75 +309,71 @@ func RailsCallback(appName string, srcInfo *SourceInfo, plan *plan.LaunchPlan, f // install dockerfile-rails gem, if not already included and the gem directory is writable // if an error occurrs, store it for later in pendingError - - var err error - var pendingError error generatorInstalled := false - if _, err := os.Stat("Dockerfile"); err != nil { - gemfile, err := os.ReadFile("Gemfile") - if err != nil { - return errors.Wrap(err, "Failed to read Gemfile") - } else if !strings.Contains(string(gemfile), "dockerfile-rails") { - // check for writable gem installation directory - writable := false - out, err := exec.Command("gem", "environment").Output() - if err == nil { - regexp := regexp.MustCompile(`INSTALLATION DIRECTORY: (.*)\n`) - for _, match := range regexp.FindAllStringSubmatch(string(out), -1) { - // Testing to see if a directory is writable is OS dependent, so - // we use a brute force method: attempt it and see if it works. - file, err := os.CreateTemp(match[1], ".flyctl.probe") - if err == nil { - writable = true - file.Close() - defer os.Remove(file.Name()) - } + var pendingError error + gemfile, err := os.ReadFile("Gemfile") + if err != nil { + return errors.Wrap(err, "Failed to read Gemfile") + } else if !strings.Contains(string(gemfile), "dockerfile-rails") { + // check for writable gem installation directory + writable := false + out, err := exec.Command("gem", "environment").Output() + if err == nil { + regexp := regexp.MustCompile(`INSTALLATION DIRECTORY: (.*)\n`) + for _, match := range regexp.FindAllStringSubmatch(string(out), -1) { + // Testing to see if a directory is writable is OS dependent, so + // we use a brute force method: attempt it and see if it works. + file, err := os.CreateTemp(match[1], ".flyctl.probe") + if err == nil { + writable = true + file.Close() + defer os.Remove(file.Name()) } } + } - // install dockerfile-rails gem if the gem installation directory is writable - if writable { - cmd := exec.Command(bundle, "add", "dockerfile-rails", - "--optimistic", "--group", "development", "--skip-install") - cmd.Stdin = nil - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - - pendingError = cmd.Run() - if pendingError != nil { - pendingError = errors.Wrap(pendingError, "Failed to add dockerfile-rails gem") - } else { - generatorInstalled = true - } + // install dockerfile-rails gem if the gem installation directory is writable + if writable { + cmd := exec.Command(bundle, "add", "dockerfile-rails", + "--optimistic", "--group", "development", "--skip-install") + cmd.Stdin = nil + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + pendingError = cmd.Run() + if pendingError != nil { + pendingError = errors.Wrap(pendingError, "Failed to add dockerfile-rails gem") + } else { + generatorInstalled = true } - } else { - // proceed using the already installed gem - generatorInstalled = true } + } else { + // proceed using the already installed gem + generatorInstalled = true + } - cmd := exec.Command(bundle, "install", "--quiet") - cmd.Stdin = nil - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr + cmd := exec.Command(bundle, "install", "--quiet") + cmd.Stdin = nil + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr - err = cmd.Run() - if err != nil { - return errors.Wrap(pendingError, "Failed to install bundle, exiting") - } + err = cmd.Run() + if err != nil { + return errors.Wrap(err, "Failed to install bundle, exiting") + } - // ensure Gemfile.lock includes the x86_64-linux platform - if out, err := exec.Command(bundle, "platform").Output(); err == nil { - if !strings.Contains(string(out), "x86_64-linux") { - cmd := exec.Command(bundle, "lock", "--add-platform", "x86_64-linux") - if err := cmd.Run(); err != nil { - return errors.Wrap(err, "Failed to add x86_64-linux platform, exiting") - } + // ensure Gemfile.lock includes the x86_64-linux platform + if out, err := exec.Command(bundle, "platform").Output(); err == nil { + if !strings.Contains(string(out), "x86_64-linux") { + cmd := exec.Command(bundle, "lock", "--add-platform", "x86_64-linux") + if err := cmd.Run(); err != nil { + return errors.Wrap(err, "Failed to add x86_64-linux platform, exiting") } } } // ensure fly.toml exists. If present, the rails dockerfile generator will - // add volumes, processes, release command and potentailly other configuration. + // add volumes, processes, release command and potentially other configuration. flyToml := "fly.toml" _, err = os.Stat(flyToml) if os.IsNotExist(err) { @@ -353,42 +391,47 @@ func RailsCallback(appName string, srcInfo *SourceInfo, plan *plan.LaunchPlan, f } } - // run command if the generator is available - if generatorInstalled { - // base generate command - args := []string{binrails, "generate", "dockerfile", - "--label=fly_launch_runtime:rails"} + // base generate command + args := []string{binrails, "generate", "dockerfile", + "--label=fly_launch_runtime:rails"} - // skip prompt to replace files if Dockerfile already exists - _, err = os.Stat("Dockerfile") - if !errors.Is(err, fs.ErrNotExist) { - args = append(args, "--skip") + // skip prompt to replace files if Dockerfile already exists + _, err = os.Stat("Dockerfile") + if !errors.Is(err, fs.ErrNotExist) { + args = append(args, "--skip") - if !generatorInstalled { - return errors.Wrap(pendingError, "No Dockerfile found") - } + if !generatorInstalled { + return errors.Wrap(pendingError, "No Dockerfile found") } + } - // add postgres - if plan.Postgres.Provider() != nil { - args = append(args, "--postgresql", "--no-prepare") - } + // add postgres + if plan.Postgres.Provider() != nil { + args = append(args, "--postgresql", "--no-prepare") + } - // add redis - if plan.Redis.Provider() != nil { - args = append(args, "--redis") - } + // add redis + if plan.Redis.Provider() != nil { + args = append(args, "--redis") + } - // add object storage - if plan.ObjectStorage.Provider() != nil { - args = append(args, "--tigris") - } + // add object storage + if plan.ObjectStorage.Provider() != nil { + args = append(args, "--tigris") - // add additional flags from launch command - if len(flags) > 0 { - args = append(args, flags...) + // add litestream if object storage is available and the database is sqlite + if srcInfo.DatabaseDesired == DatabaseKindSqlite { + args = append(args, "--litestream") } + } + + // add additional flags from launch command + if len(flags) > 0 { + args = append(args, flags...) + } + // run command if the generator is available + if generatorInstalled { fmt.Printf("Running: %s\n", strings.Join(args, " ")) cmd := exec.Command(ruby, args...) cmd.Stdin = os.Stdin @@ -419,7 +462,7 @@ The following comand can be used to update your Dockerfile: // read dockerfile dockerfile, err := os.ReadFile("Dockerfile") if err == nil { - if generatorInstalled && pendingError != nil { + if pendingError != nil { // generator may have failed, but Dockerfile was created - warn user fmt.Println("Error running dockerfile generator:", pendingError) } diff --git a/scanner/rust.go b/scanner/rust.go index 50b02499a6..4719c63898 100644 --- a/scanner/rust.go +++ b/scanner/rust.go @@ -1,6 +1,10 @@ package scanner -import "github.com/superfly/flyctl/internal/command/launch/plan" +import ( + "fmt" + + "github.com/superfly/flyctl/internal/command/launch/plan" +) func configureRust(sourceDir string, _ *ScannerConfig) (*SourceInfo, error) { if !checksPass(sourceDir, fileExists("Cargo.toml", "Cargo.lock")) { @@ -12,7 +16,8 @@ func configureRust(sourceDir string, _ *ScannerConfig) (*SourceInfo, error) { return nil, err } - deps := cargoData["dependencies"].(map[string]interface{}) + // Cargo.toml may not contain a "dependencies" section, so we don't return an error if it's missing. + deps, _ := cargoData["dependencies"].(map[string]interface{}) family := "Rust" env := map[string]string{ "PORT": "8080", @@ -32,8 +37,16 @@ func configureRust(sourceDir string, _ *ScannerConfig) (*SourceInfo, error) { family = "Poem" } + pkg, ok := cargoData["package"].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("file Cargo.toml does not contain a valid package section") + } + vars := make(map[string]interface{}) - vars["appName"] = cargoData["package"].(map[string]interface{})["name"].(string) + vars["appName"], ok = pkg["name"].(string) + if !ok { + return nil, fmt.Errorf("file Cargo.toml does not contain a valid package name") + } s := &SourceInfo{ Files: templatesExecute("templates/rust", vars), diff --git a/scanner/scanner.go b/scanner/scanner.go index 017255fff5..c9aac30e6f 100644 --- a/scanner/scanner.go +++ b/scanner/scanner.go @@ -52,6 +52,7 @@ type SourceInfo struct { BuildArgs map[string]string Builder string ReleaseCmd string + SeedCmd string DockerCommand string DockerEntrypoint string KillSignal string @@ -87,6 +88,7 @@ type SourceInfo struct { AutoInstrumentErrors bool FailureCallback func(err error) error Runtime plan.RuntimeStruct + PostInitCallback func() error } type SourceFile struct { diff --git a/scanner/templates/deno/Dockerfile b/scanner/templates/deno/Dockerfile index ab4b6e5b63..af724204fe 100644 --- a/scanner/templates/deno/Dockerfile +++ b/scanner/templates/deno/Dockerfile @@ -1,10 +1,10 @@ # Based on https://github.com/denoland/deno_docker/blob/main/alpine.dockerfile -ARG DENO_VERSION=2.0.4 +ARG DENO_VERSION=2.4.5 ARG BIN_IMAGE=denoland/deno:bin-${DENO_VERSION} FROM ${BIN_IMAGE} AS bin -FROM frolvlad/alpine-glibc:alpine-3.13 +FROM frolvlad/alpine-glibc:alpine-3.22 RUN apk --no-cache add ca-certificates diff --git a/scanner/templates/python/.dockerignore b/scanner/templates/python/.dockerignore index 0501d092d2..94e52f4dcc 100644 --- a/scanner/templates/python/.dockerignore +++ b/scanner/templates/python/.dockerignore @@ -1 +1,2 @@ fly.toml +.venv diff --git a/scripts/preflight.sh b/scripts/preflight.sh index abdbaf69b6..6c89ac6d60 100755 --- a/scripts/preflight.sh +++ b/scripts/preflight.sh @@ -47,7 +47,7 @@ gotesplit \ -total "$total" \ -index "$index" \ github.com/superfly/flyctl/test/preflight/... \ - -- --tags=integration -v -timeout=10m $test_opts | tee "$test_log" + -- --tags=integration -v -timeout=15m $test_opts | tee "$test_log" test_status=$? set -e diff --git a/scripts/publish_docs.sh b/scripts/publish_docs.sh index 338c794e28..688331e1f6 100755 --- a/scripts/publish_docs.sh +++ b/scripts/publish_docs.sh @@ -4,8 +4,8 @@ BRANCH=flyctl-docs_$1 scripts/generate_docs.sh docs/flyctl/cmd cd docs -git config --global user.email "joshua@fly.io" -git config --global user.name "Fly.io CI" +git config --global user.name 'docs-syncer[bot]' +git config --global user.email '134718678+docs-syncer[bot]@users.noreply.github.com' git checkout -b $BRANCH git add flyctl/cmd git diff --cached --quiet @@ -13,6 +13,6 @@ git diff --cached --quiet if [ $? -gt 0 ]; then git commit -a -m "[flyctl-bot] Update docs from flyctl" git push -f --set-upstream origin HEAD:$BRANCH - gh pr create -t "[flybot] Fly CLI docs update" -b "Fly CLI docs update" -B main -H $BRANCH -r jsierles + gh pr create -t "[flybot] Fly CLI docs update" -b "Fly CLI docs update" -B main -H $BRANCH gh pr merge --delete-branch --squash fi diff --git a/ssh/client.go b/ssh/client.go index 4df9222111..82ec177081 100644 --- a/ssh/client.go +++ b/ssh/client.go @@ -104,7 +104,7 @@ func (c *Client) Connect(ctx context.Context) error { } } -func (c *Client) Shell(ctx context.Context, sessIO *SessionIO, cmd string) error { +func (c *Client) Shell(ctx context.Context, sessIO *SessionIO, cmd string, container string) error { if c.Client == nil { if err := c.Connect(ctx); err != nil { return err @@ -112,9 +112,18 @@ func (c *Client) Shell(ctx context.Context, sessIO *SessionIO, cmd string) error } sess, err := c.Client.NewSession() + if err != nil { return err } + + if container != "" { + err = sess.Setenv("FLY_SSH_CONTAINER", container) + if err != nil { + return err + } + } + defer sess.Close() return sessIO.attach(ctx, sess, cmd) diff --git a/test/fixtures/bun-basic/index.ts b/test/fixtures/bun-basic/index.ts index 1daad102b5..d99cb6ea04 100644 --- a/test/fixtures/bun-basic/index.ts +++ b/test/fixtures/bun-basic/index.ts @@ -2,4 +2,4 @@ Bun.serve({ fetch(req) { return new Response("Hello, Bun!"); }, - }); \ No newline at end of file + }); diff --git a/test/fixtures/bun-basic/package.json b/test/fixtures/bun-basic/package.json index f0076f2e3f..878c4aa9d3 100644 --- a/test/fixtures/bun-basic/package.json +++ b/test/fixtures/bun-basic/package.json @@ -8,4 +8,4 @@ "peerDependencies": { "typescript": "^5.0.0" } -} \ No newline at end of file +} diff --git a/test/fixtures/deno-no-config/index.ts b/test/fixtures/deno-no-config/index.ts index 7f3a0fb466..a663eb0096 100644 --- a/test/fixtures/deno-no-config/index.ts +++ b/test/fixtures/deno-no-config/index.ts @@ -1,3 +1,3 @@ Deno.serve({ port: 8080, hostname: "0.0.0.0" }, (_req) => { return new Response("Hello, World!"); -}); \ No newline at end of file +}); diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/.gitignore b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/.gitignore index ed6f936b80..5381f931ae 100644 --- a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/.gitignore +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/.gitignore @@ -33,4 +33,4 @@ hello_elixir-*.tar npm-debug.log /assets/node_modules/ -hello_elixir_dev.db* \ No newline at end of file +hello_elixir_dev.db* diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/README.md b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/README.md index 20a1ae1d48..c1eec36815 100644 --- a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/README.md +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/README.md @@ -4,7 +4,7 @@ Welcome to our Code Server for Phoenix Apps. ## Development -Right now this editor is running at ${FLY_CODE_URL}. +Right now this editor is running at ${FLY_CODE_URL}. You need to start the development server to see yout app running at ${FLY_DEVELOPMENT_URL}. @@ -16,9 +16,9 @@ mix phx.server Looks like we're ready to deploy! -To deploy you just need to run `fly launch --no-deploy`, create your secret key and create a volume. +To deploy you just need to run `fly launch --no-deploy`, create your secret key and create a volume. -Run `fly launch --no-deploy` and make sure to say yes to copy the configuration file +Run `fly launch --no-deploy` and make sure to say yes to copy the configuration file to the new app so you wont have to do anything. ```sh @@ -41,15 +41,15 @@ Wrote config file fly.toml Your app is ready. Deploy with `flyctl deploy` ``` -Let's got create your secret key. Elixir has a mix task that can generate a new +Let's got create your secret key. Elixir has a mix task that can generate a new Phoenix key base secret. Let's use that. ```bash mix phx.gen.secret ``` -It generates a long string of random text. Let's store that as a secret for our app. -When we run this command in our project folder, `flyctl` uses the `fly.toml` +It generates a long string of random text. Let's store that as a secret for our app. +When we run this command in our project folder, `flyctl` uses the `fly.toml` file to know which app we are setting the value on. ```sh @@ -79,4 +79,4 @@ Now go for the deploy! $ fly deploy ``` -... will bring up your app! \ No newline at end of file +... will bring up your app! diff --git a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/entrypoint.sh b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/entrypoint.sh index d4def0a0b8..0019747823 100755 --- a/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/entrypoint.sh +++ b/test/fixtures/deploy-phoenix-sqlite-custom-tool-versions/entrypoint.sh @@ -6,4 +6,4 @@ if [ ! -f /data/prod.db ]; then fi /app/entry eval HelloElixir.Release.migrate && \ - /app/entry start \ No newline at end of file + /app/entry start diff --git a/test/fixtures/deploy-phoenix-sqlite/.gitignore b/test/fixtures/deploy-phoenix-sqlite/.gitignore index 84ddde453e..b94f616275 100644 --- a/test/fixtures/deploy-phoenix-sqlite/.gitignore +++ b/test/fixtures/deploy-phoenix-sqlite/.gitignore @@ -38,4 +38,3 @@ npm-debug.log # Database files *.db *.db-* - diff --git a/test/fixtures/deploy-phoenix-sqlite/assets/js/app.js b/test/fixtures/deploy-phoenix-sqlite/assets/js/app.js index d5e278afe5..2c24f91685 100644 --- a/test/fixtures/deploy-phoenix-sqlite/assets/js/app.js +++ b/test/fixtures/deploy-phoenix-sqlite/assets/js/app.js @@ -41,4 +41,3 @@ liveSocket.connect() // >> liveSocket.enableLatencySim(1000) // enabled for duration of browser session // >> liveSocket.disableLatencySim() window.liveSocket = liveSocket - diff --git a/test/fixtures/deploy-rails-7.0/config/credentials.yml.enc b/test/fixtures/deploy-rails-7.0/config/credentials.yml.enc index 9b5b435f7b..c15df2735b 100644 --- a/test/fixtures/deploy-rails-7.0/config/credentials.yml.enc +++ b/test/fixtures/deploy-rails-7.0/config/credentials.yml.enc @@ -1 +1 @@ -hbsGC34xuHLHN4/dtXIQ3sRB/Rrr+DTA1aHx2G6MWzLS7gvH1MHCkwsjcIdTJxRVhlGGP3zxlUxoNDTjfsfoEYQlCtN2v4N10yYUsAYA86xpEk9LkRaU9uOsBanRd6e6mAhxUoA1JwR4N7gQwovNNOScenwRpS2DVP6slF908t1tV19lcx8m3DRKUFHjYIVjrDo36+PccLcUa25CY20qZOFPBGCXtF3thabUlQsNuJrscuatPH3hfnGt6cUQiXIRjvaVJFo8oZ7EhosE9TD/XhuG0QlL4VNbqyNEOTvHhV4P5iIN55p94iluA7PfPFTuPL0Z8Pg64fpF1cuyt5BFt3mnEtja9uN/HH7fYPOS8BnPwgPYZKEtbj4jGk3iIqSanp8Yvxvx+Dme/w1bY7kqaxNBGcYx0Bx5/UTH--HzmIYSvtUBvYeVKm--/QV0gNf0jH389bNku/NBEA== \ No newline at end of file +hbsGC34xuHLHN4/dtXIQ3sRB/Rrr+DTA1aHx2G6MWzLS7gvH1MHCkwsjcIdTJxRVhlGGP3zxlUxoNDTjfsfoEYQlCtN2v4N10yYUsAYA86xpEk9LkRaU9uOsBanRd6e6mAhxUoA1JwR4N7gQwovNNOScenwRpS2DVP6slF908t1tV19lcx8m3DRKUFHjYIVjrDo36+PccLcUa25CY20qZOFPBGCXtF3thabUlQsNuJrscuatPH3hfnGt6cUQiXIRjvaVJFo8oZ7EhosE9TD/XhuG0QlL4VNbqyNEOTvHhV4P5iIN55p94iluA7PfPFTuPL0Z8Pg64fpF1cuyt5BFt3mnEtja9uN/HH7fYPOS8BnPwgPYZKEtbj4jGk3iIqSanp8Yvxvx+Dme/w1bY7kqaxNBGcYx0Bx5/UTH--HzmIYSvtUBvYeVKm--/QV0gNf0jH389bNku/NBEA== diff --git a/test/fixtures/deploy-rails-7.2/config/credentials.yml.enc b/test/fixtures/deploy-rails-7.2/config/credentials.yml.enc index ecff7731dd..2829e6667b 100644 --- a/test/fixtures/deploy-rails-7.2/config/credentials.yml.enc +++ b/test/fixtures/deploy-rails-7.2/config/credentials.yml.enc @@ -1 +1 @@ -Gkhk/4gkbLdSUZ+giBYulckjb5oo6CuR7BOEq/V9PPKMafhLLxxdky4lr1/lr4+hRIk4BjfcEGrxBwtMBUqewcIqRPg5WLQX1ti18Fzlp4LNNAkBwTK5t0dYl0/A/DbtJYlwrcazIG97mZIDubmr1cp2DAjk8gonRAmBGH9kuzod4ZFJGU+zewEU6i5elSrT0mins1tw2xlwS9MkY0DEgaXqmVvU0I6eQt5m2qpohA71kIgzAlwRaBSYIoBKpjHDMGQPemXAzl1AB0u6/x2w99ZnVRb7Pyk6RHNIctdtHXGpO0cWq4EYUv+UIzj4hWajDifZCErvxvgn7rwTiD83hIwhjguuVq+0MC+qkA900fRTUY60/tx8zeZe3JNnrQsXuklilg0MyFi0iiVK2FKskhrSTlMs--221rlrI+z1Au1fMN--+uSIAByfvLaNhpKuDyWaBQ== \ No newline at end of file +Gkhk/4gkbLdSUZ+giBYulckjb5oo6CuR7BOEq/V9PPKMafhLLxxdky4lr1/lr4+hRIk4BjfcEGrxBwtMBUqewcIqRPg5WLQX1ti18Fzlp4LNNAkBwTK5t0dYl0/A/DbtJYlwrcazIG97mZIDubmr1cp2DAjk8gonRAmBGH9kuzod4ZFJGU+zewEU6i5elSrT0mins1tw2xlwS9MkY0DEgaXqmVvU0I6eQt5m2qpohA71kIgzAlwRaBSYIoBKpjHDMGQPemXAzl1AB0u6/x2w99ZnVRb7Pyk6RHNIctdtHXGpO0cWq4EYUv+UIzj4hWajDifZCErvxvgn7rwTiD83hIwhjguuVq+0MC+qkA900fRTUY60/tx8zeZe3JNnrQsXuklilg0MyFi0iiVK2FKskhrSTlMs--221rlrI+z1Au1fMN--+uSIAByfvLaNhpKuDyWaBQ== diff --git a/test/fixtures/deploy-rails-8/config/credentials.yml.enc b/test/fixtures/deploy-rails-8/config/credentials.yml.enc index 3b76547d41..4777b68451 100644 --- a/test/fixtures/deploy-rails-8/config/credentials.yml.enc +++ b/test/fixtures/deploy-rails-8/config/credentials.yml.enc @@ -1 +1 @@ -tNT02O8Xba+tAfED29z+BDnG9NgzhlZvQxyMHNJGaLeugvHvfSG5RxO5KmvU7rNE0rWT8F078rP2qtZdxfZs1TH6wmYN4D0VH7CoNYEIz5ddbPDrXR/8689eCLKJlaSHxof/toVhqofCNdBF4l8dlQLWgXaytjpX0YnX/ZVVLo4NaPcH1roAySEJ9ECzy83FiDporIz7LgRJRbCQbj5hBFvHGVevFD3/xOR1d9eTUgT+iBhRBUgT8vb+rkAWPtUIbi2p2KILglrq9D7YMlokWYRUvvuUKS7fMCVPk8josz2L1EhVKd2X+YIAszH2eZKfltYiyfDx3+waLDJVe3dNPoz4scjp5jX0QfB/cxm25w5iuSwmSlJcZKMypO5tZ7pOQ2Rd0vsKo2QrrvI6SGKcIErYCF34HEMIe3aj8WJUB7gsSQXRRcWwwHtKUXsgAuuHRSlUq3EAtqNaoRZdSXBB6zvUa2OIksCPptDYkEcbeH7jnL+eaC+zgjuX--GGzRYapcAGQ8eCga--68JXDOlpBsbTpFUYNJud0g== \ No newline at end of file +tNT02O8Xba+tAfED29z+BDnG9NgzhlZvQxyMHNJGaLeugvHvfSG5RxO5KmvU7rNE0rWT8F078rP2qtZdxfZs1TH6wmYN4D0VH7CoNYEIz5ddbPDrXR/8689eCLKJlaSHxof/toVhqofCNdBF4l8dlQLWgXaytjpX0YnX/ZVVLo4NaPcH1roAySEJ9ECzy83FiDporIz7LgRJRbCQbj5hBFvHGVevFD3/xOR1d9eTUgT+iBhRBUgT8vb+rkAWPtUIbi2p2KILglrq9D7YMlokWYRUvvuUKS7fMCVPk8josz2L1EhVKd2X+YIAszH2eZKfltYiyfDx3+waLDJVe3dNPoz4scjp5jX0QfB/cxm25w5iuSwmSlJcZKMypO5tZ7pOQ2Rd0vsKo2QrrvI6SGKcIErYCF34HEMIe3aj8WJUB7gsSQXRRcWwwHtKUXsgAuuHRSlUq3EAtqNaoRZdSXBB6zvUa2OIksCPptDYkEcbeH7jnL+eaC+zgjuX--GGzRYapcAGQ8eCga--68JXDOlpBsbTpFUYNJud0g== diff --git a/test/fixtures/django-basic/mysite/urls.py b/test/fixtures/django-basic/mysite/urls.py index dc087204dd..efc54c7d39 100644 --- a/test/fixtures/django-basic/mysite/urls.py +++ b/test/fixtures/django-basic/mysite/urls.py @@ -4,4 +4,4 @@ urlpatterns = [ path("polls/", include("polls.urls")), path("admin/", admin.site.urls), -] \ No newline at end of file +] diff --git a/test/fixtures/django-basic/polls/urls.py b/test/fixtures/django-basic/polls/urls.py index a9d7f56e7e..5119061b3e 100644 --- a/test/fixtures/django-basic/polls/urls.py +++ b/test/fixtures/django-basic/polls/urls.py @@ -4,4 +4,4 @@ urlpatterns = [ path("", views.index, name="index"), -] \ No newline at end of file +] diff --git a/test/fixtures/django-basic/polls/views.py b/test/fixtures/django-basic/polls/views.py index 94bc318991..963b6f7088 100644 --- a/test/fixtures/django-basic/polls/views.py +++ b/test/fixtures/django-basic/polls/views.py @@ -2,4 +2,4 @@ def index(request): - return HttpResponse("Hello, world. You're at the polls index.") \ No newline at end of file + return HttpResponse("Hello, world. You're at the polls index.") diff --git a/test/fixtures/static/index.html b/test/fixtures/static/index.html index 14612b0114..2e387fc5f5 100644 --- a/test/fixtures/static/index.html +++ b/test/fixtures/static/index.html @@ -3,4 +3,4 @@ Hello World - \ No newline at end of file + diff --git a/test/preflight/fixtures/example-buildpack/Gemfile b/test/preflight/fixtures/example-buildpack/Gemfile index 44d0579008..d2bc22062c 100644 --- a/test/preflight/fixtures/example-buildpack/Gemfile +++ b/test/preflight/fixtures/example-buildpack/Gemfile @@ -11,4 +11,4 @@ gem "rack" # https://bugs.ruby-lang.org/issues/17303 gem "puma" -gem "sinatra" +gem "sinatra", "~> 4.1" diff --git a/test/preflight/fixtures/example-buildpack/Gemfile.lock b/test/preflight/fixtures/example-buildpack/Gemfile.lock index 37ac3db47c..ed488e4431 100644 --- a/test/preflight/fixtures/example-buildpack/Gemfile.lock +++ b/test/preflight/fixtures/example-buildpack/Gemfile.lock @@ -1,19 +1,28 @@ GEM remote: https://rubygems.org/ specs: - mustermann (2.0.2) + base64 (0.3.0) + logger (1.7.0) + mustermann (3.0.4) ruby2_keywords (~> 0.0.1) nio4r (2.7.4) puma (6.5.0) nio4r (~> 2.0) - rack (2.2.8.1) - rack-protection (2.2.3) - rack + rack (3.2.1) + rack-protection (4.1.1) + base64 (>= 0.1.0) + logger (>= 1.6.0) + rack (>= 3.0.0, < 4) + rack-session (2.1.1) + base64 (>= 0.1.0) + rack (>= 3.0.0) ruby2_keywords (0.0.5) - sinatra (2.2.3) - mustermann (~> 2.0) - rack (~> 2.2) - rack-protection (= 2.2.3) + sinatra (4.1.1) + logger (>= 1.6.0) + mustermann (~> 3.0) + rack (>= 3.0.0, < 4) + rack-protection (= 4.1.1) + rack-session (>= 2.0.0, < 3) tilt (~> 2.0) tilt (2.1.0) @@ -23,7 +32,7 @@ PLATFORMS DEPENDENCIES puma rack - sinatra + sinatra (~> 4.1) BUNDLED WITH - 2.0.2 + 2.6.1 diff --git a/test/preflight/fly_console_test.go b/test/preflight/fly_console_test.go index 8203ceb272..706515d2df 100644 --- a/test/preflight/fly_console_test.go +++ b/test/preflight/fly_console_test.go @@ -4,6 +4,8 @@ package preflight import ( + "os" + "path/filepath" "testing" "time" @@ -17,28 +19,48 @@ func TestFlyConsole(t *testing.T) { appName := f.CreateRandomAppMachines() targetOutput := "console test in " + appName + // The image is based on Debian bookworm. f.WriteFlyToml(` app = "%s" +primary_region = "%s" console_command = "/bin/echo '%s'" [build] - image = "nginx" + image = "nginx:1.29-bookworm" [processes] app = "/bin/sleep inf" `, - appName, targetOutput, + appName, f.PrimaryRegion(), targetOutput, ) f.Fly("deploy --ha=false") - result := f.Fly("console") - output := result.StdOutString() - require.Contains(f, output, targetOutput) + t.Run("console_command", func(t *testing.T) { + result := f.Fly("console") + output := result.StdOutString() + require.Contains(f, output, targetOutput) + }) - // Give time for the machine to be destroyed. - require.EventuallyWithT(t, func(c *assert.CollectT) { + t.Run("dockerfile", func(t *testing.T) { + dockerfile := filepath.Join(t.TempDir(), "dockerfile") + err := os.WriteFile(dockerfile, []byte(` +FROM alpine:latest +CMD ["/bin/sleep", "inf"] +`), 0644) + require.NoError(t, err) + + result := f.Fly("console -a %s --dockerfile %s", appName, dockerfile) + assert.Contains(t, result.StdOutString(), targetOutput, "console_command is still used") + + // Because of the dockerfile, the image here is Alpine. + result = f.Fly("console -a %s --dockerfile %s --command 'cat /etc/os-release'", appName, dockerfile) + assert.Contains(t, result.StdOutString(), "ID=alpine") + }) + + // All the tests above make ephemeral machines. They should be gone eventually. + assert.EventuallyWithT(t, func(t *assert.CollectT) { ml := f.MachinesList(appName) - assert.Equal(c, 1, len(ml)) - }, 10*time.Second, 1*time.Second) + assert.Equal(t, 1, len(ml)) + }, 10*time.Second, 1*time.Second, "machines are ephemeral and eventually gone") } diff --git a/test/preflight/fly_deploy_test.go b/test/preflight/fly_deploy_test.go index 5cc39b1b8f..744270ff67 100644 --- a/test/preflight/fly_deploy_test.go +++ b/test/preflight/fly_deploy_test.go @@ -110,7 +110,7 @@ func TestFlyDeploy_DeployToken_FailingSmokeCheck(t *testing.T) { [experimental] entrypoint = "/bin/false" ` - f.WriteFlyToml(appConfig) + f.WriteFlyToml("%s", appConfig) f.OverrideAuthAccessToken(f.Fly("tokens deploy").StdOutString()) deployRes := f.FlyAllowExitFailure("deploy") output := deployRes.StdErrString() @@ -128,7 +128,7 @@ func TestFlyDeploy_DeployToken_FailingReleaseCommand(t *testing.T) { [deploy] release_command = "/bin/false" ` - f.WriteFlyToml(appConfig) + f.WriteFlyToml("%s", appConfig) f.OverrideAuthAccessToken(f.Fly("tokens deploy").StdOut().String()) deployRes := f.FlyAllowExitFailure("deploy") output := deployRes.StdErrString() @@ -319,7 +319,7 @@ func TestFlyDeploy_DeployMachinesCheck(t *testing.T) { entrypoint = ["/bin/sh", "-c"] command = ["curl http://[$FLY_TEST_MACHINE_IP]:80"] ` - f.WriteFlyToml(appConfig) + f.WriteFlyToml("%s", appConfig) f.OverrideAuthAccessToken(f.Fly("tokens deploy").StdOut().String()) deployRes := f.Fly("deploy") output := deployRes.StdOutString() @@ -337,7 +337,7 @@ func TestFlyDeploy_NoServiceDeployMachinesCheck(t *testing.T) { entrypoint = ["/bin/sh", "-c"] command = ["curl http://[$FLY_TEST_MACHINE_IP]:80"] ` - f.WriteFlyToml(appConfig) + f.WriteFlyToml("%s", appConfig) f.OverrideAuthAccessToken(f.Fly("tokens deploy").StdOut().String()) deployRes := f.Fly("deploy") output := deployRes.StdOutString() @@ -355,7 +355,7 @@ func TestFlyDeploy_DeployMachinesCheckCanary(t *testing.T) { entrypoint = ["/bin/sh", "-c"] command = ["curl http://[$FLY_TEST_MACHINE_IP]:80"] ` - f.WriteFlyToml(appConfig) + f.WriteFlyToml("%s", appConfig) f.OverrideAuthAccessToken(f.Fly("tokens deploy").StdOut().String()) deployRes := f.Fly("deploy") output := deployRes.StdOutString() diff --git a/test/preflight/fly_postgres_test.go b/test/preflight/fly_postgres_test.go index 5a346a0377..f042f8cb61 100644 --- a/test/preflight/fly_postgres_test.go +++ b/test/preflight/fly_postgres_test.go @@ -167,14 +167,14 @@ func assertMachineCount(tb assert.TestingT, f *testlib.FlyctlTestEnv, appName st // assertPostgresIsUp checks that the given Postgres server is really up. // Even after "fly pg create", sometimes the server is not ready for accepting connections. -func assertPostgresIsUp(tb testing.TB, f *testlib.FlyctlTestEnv, appName string) { - tb.Helper() - +func assertPostgresIsUp(tb assert.TestingT, f *testlib.FlyctlTestEnv, appName string) { ssh := f.FlyAllowExitFailure(`ssh console -a %s -u postgres -C "psql -p 5433 -h /run/postgresql -c 'SELECT 1'"`, appName) assert.Equal(tb, 0, ssh.ExitCode(), "failed to connect to postgres at %s: %s", appName, ssh.StdErr()) } func TestPostgres_ImportSuccess(t *testing.T) { + t.Skip() + f := testlib.NewTestEnvFromEnv(t) // Since this explicitly sets a size, no need to test on GPUs/alternate @@ -194,7 +194,7 @@ func TestPostgres_ImportSuccess(t *testing.T) { "pg create --org %s --name %s --region %s --initial-cluster-size 1 --vm-size %s --volume-size 1", f.OrgSlug(), secondAppName, f.PrimaryRegion(), postgresMachineSize, ) - assert.EventuallyWithT(t, func(c *assert.CollectT) { + assert.EventuallyWithT(t, func(t *assert.CollectT) { assertPostgresIsUp(t, f, firstAppName) }, 1*time.Minute, 10*time.Second) @@ -220,12 +220,14 @@ func TestPostgres_ImportSuccess(t *testing.T) { require.Contains(f, output, firstAppName) // Wait for the importer machine to be destroyed. - assert.EventuallyWithT(t, func(c *assert.CollectT) { + assert.EventuallyWithT(t, func(t *assert.CollectT) { assertMachineCount(t, f, secondAppName, 1) }, 2*time.Minute, 10*time.Second, "import machine not destroyed") } func TestPostgres_ImportFailure(t *testing.T) { + t.Skip() + f := testlib.NewTestEnvFromEnv(t) // Since this explicitly sets a size, no need to test on GPUs/alternate @@ -240,7 +242,7 @@ func TestPostgres_ImportFailure(t *testing.T) { "pg create --org %s --name %s --region %s --initial-cluster-size 1 --vm-size %s --volume-size 1 --password x", f.OrgSlug(), appName, f.PrimaryRegion(), postgresMachineSize, ) - assert.EventuallyWithT(t, func(c *assert.CollectT) { + assert.EventuallyWithT(t, func(t *assert.CollectT) { assertPostgresIsUp(t, f, appName) }, 1*time.Minute, 10*time.Second) @@ -252,7 +254,7 @@ func TestPostgres_ImportFailure(t *testing.T) { require.Contains(f, result.StdOut().String(), "database \"test\" does not exist") // Wait for the importer machine to be destroyed. - assert.EventuallyWithT(t, func(c *assert.CollectT) { + assert.EventuallyWithT(t, func(t *assert.CollectT) { assertMachineCount(t, f, appName, 1) }, 1*time.Minute, 10*time.Second, "import machine not destroyed") } diff --git a/test/preflight/fly_scale_test.go b/test/preflight/fly_scale_test.go index e9fe157133..912842c853 100644 --- a/test/preflight/fly_scale_test.go +++ b/test/preflight/fly_scale_test.go @@ -79,7 +79,7 @@ destination = "/data" ` } - f.WriteFlyToml(config) + f.WriteFlyToml("%s", config) f.Fly("deploy --ha=false") assertMachineCount(t, f, appName, 1) diff --git a/test/preflight/fly_volume_test.go b/test/preflight/fly_volume_test.go index 4e3ef8b177..496e817bed 100644 --- a/test/preflight/fly_volume_test.go +++ b/test/preflight/fly_volume_test.go @@ -97,29 +97,29 @@ func testVolumeLs(t *testing.T) { f.Fly("vol destroy -y %s", destroyed.ID) // Deleted volumes shouldn't be shown. - assert.EventuallyWithT(f, func(c *assert.CollectT) { + assert.EventuallyWithT(f, func(t *assert.CollectT) { lsRes := f.Fly("vol ls -a %s --json", appName) var ls []*fly.Volume lsRes.StdOutJSON(&ls) - assert.Lenf(f, ls, 1, "volume %s is still visible", destroyed.ID) - assert.Equal(f, kept.ID, ls[0].ID) + assert.Lenf(t, ls, 1, "volume %s is still visible", destroyed.ID) + assert.Equal(t, kept.ID, ls[0].ID) }, 5*time.Minute, 10*time.Second) // Deleted volumes should be shown with --all. - assert.EventuallyWithT(f, func(c *assert.CollectT) { + assert.EventuallyWithT(f, func(t *assert.CollectT) { lsAllRes := f.Fly("vol ls --all -a %s --json", appName) var lsAll []*fly.Volume lsAllRes.StdOutJSON(&lsAll) - assert.Len(f, lsAll, 2) + assert.Len(t, lsAll, 2) var lsAllIds []string for _, v := range lsAll { lsAllIds = append(lsAllIds, v.ID) } - assert.Contains(f, lsAllIds, kept.ID) - assert.Contains(f, lsAllIds, destroyed.ID) + assert.Contains(t, lsAllIds, kept.ID) + assert.Contains(t, lsAllIds, destroyed.ID) }, 5*time.Minute, 10*time.Second) } @@ -182,10 +182,9 @@ func testVolumeCreateFromDestroyedVolSnapshot(tt *testing.T) { j := f.Fly("vol ls -a %s --all --json", appName) j.StdOutJSON(&ls) - - assert.Equal(t, "pending_destroy", ls[0].State) assert.Len(t, ls, 1) - }, 5*time.Minute, 10*time.Second, "volume %s never made it to pending_destroy state", vol.ID) + assert.Contains(t, []string{"scheduling_destroy", "pending_destroy", "destroying", "destroyed"}, ls[0].State) + }, 5*time.Minute, 10*time.Second, "volume %s never made it to a destroy state", vol.ID) ls := f.Fly("vol snapshot ls --json %s", vol.ID) var snapshots2 []*fly.VolumeSnapshot diff --git a/test/preflight/fly_wireguard_test.go b/test/preflight/fly_wireguard_test.go new file mode 100644 index 0000000000..b348b40759 --- /dev/null +++ b/test/preflight/fly_wireguard_test.go @@ -0,0 +1,60 @@ +//go:build integration +// +build integration + +package preflight + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/superfly/flyctl/test/testlib" +) + +// cleanupDigOutput removes quotes and spaces to join TXT record parts properly +func cleanupDigOutput(s string) string { + return strings.ReplaceAll(strings.ReplaceAll(s, `"`, ""), " ", "") +} + +func TestFlyWireguardCreate(t *testing.T) { + f := testlib.NewTestEnvFromEnv(t) + + appName := f.CreateRandomAppName() + f.WriteFile("Dockerfile", `FROM alpine:3.22 +RUN apk add --no-cache bind-tools +CMD ["sleep", "infinity"] +`) + f.Fly("launch --org %s --name %s --region %s --ha=false --now", f.OrgSlug(), appName, f.PrimaryRegion()) + + // Generate a valid peer name (letters, numbers, and dashes only) + peerName := fmt.Sprintf("test-peer-%s", f.ID()) + path := filepath.Join(t.TempDir(), "wg.conf") + f.Fly("wg create %s %s %s %s", f.OrgSlug(), f.PrimaryRegion(), peerName, path) + defer f.Fly("wg remove %s %s", f.OrgSlug(), peerName) + + t.Run("Make sure the config file is created", func(t *testing.T) { + // Verify the generated wg.conf file exists and has content + data, err := os.ReadFile(path) + require.NoError(t, err, "WireGuard config file should exist at: %s", path) + require.NotEmpty(t, data, "WireGuard config file should not be empty") + }) + t.Run("Check the peer is visible from the organization", func(t *testing.T) { + machines := f.MachinesList(appName) + require.NotEmpty(t, machines, "Should have at least one machine") + machineID := machines[0].ID + + // The backend is eventually consistent. The peer may not be immediately visible. + assert.EventuallyWithT(t, func(t *assert.CollectT) { + result := f.Fly("machine exec -a %s %s 'dig +short aaaa @fdaa::3 %s._peer.internal'", appName, machineID, peerName) + assert.Contains(t, result.StdOutString(), "fdaa:") + + result = f.Fly("machine exec -a %s %s 'dig +noall +answer txt @fdaa::3 _peer.internal'", appName, machineID) + assert.Contains(t, cleanupDigOutput(result.StdOutString()), peerName) + }, 10*time.Second, time.Second) + }) +} diff --git a/test/testlib/test_env.go b/test/testlib/test_env.go index bb75e30c0e..c9455ebad5 100644 --- a/test/testlib/test_env.go +++ b/test/testlib/test_env.go @@ -188,7 +188,8 @@ type testingTWrapper interface { TempDir() string } -// Fly runs a flyctl the result +// Fly runs flyctl and returns the result. +// It fails the test if the command exits with a non-zero status. func (f *FlyctlTestEnv) Fly(flyctlCmd string, vals ...interface{}) *FlyctlResult { if f.VMSize != "" { if strings.HasPrefix(flyctlCmd, "machine run ") || strings.HasPrefix(flyctlCmd, "launch ") { @@ -199,20 +200,12 @@ func (f *FlyctlTestEnv) Fly(flyctlCmd string, vals ...interface{}) *FlyctlResult return f.FlyContextAndConfig(context.TODO(), FlyCmdConfig{}, flyctlCmd, vals...) } -// FlyAllowExitFailure runs a flyctl command and returns the result, but does not fail the test if the command exits with a non-zero status +// FlyAllowExitFailure runs flyctl command and returns the result. +// It does not fail the test even if the command exits with a non-zero status func (f *FlyctlTestEnv) FlyAllowExitFailure(flyctlCmd string, vals ...interface{}) *FlyctlResult { return f.FlyContextAndConfig(context.TODO(), FlyCmdConfig{NoAssertSuccessfulExit: true}, flyctlCmd, vals...) } -// FlyC runs a flyctl command with a context and returns the result -func (f *FlyctlTestEnv) FlyC(ctx context.Context, flyctlCmd string, vals ...interface{}) *FlyctlResult { - return f.FlyContextAndConfig(ctx, FlyCmdConfig{}, flyctlCmd, vals...) -} - -// func (f *FlyctlTestEnv) FlyAllowExitFailure(ctx context.Context, flyctlCmd string, vals ...interface{}) *FlyctlResult { -// return f.FlyContextAndConfig(ctx, FlyCmdConfig{NoAssertSuccessfulExit: true}, flyctlCmd, vals...) -// } - type FlyCmdConfig struct { NoAssertSuccessfulExit bool } diff --git a/tools/distribute/bundle/meta.go b/tools/distribute/bundle/meta.go index 04e7266f63..26a9146e5f 100644 --- a/tools/distribute/bundle/meta.go +++ b/tools/distribute/bundle/meta.go @@ -52,7 +52,7 @@ func (m Meta) Validate() error { } if m.Release.Version == nil { - return errors.New("missing version number. make sure there's a verison in release.json") + return errors.New("missing version number. make sure there's a version in release.json") } if len(m.Assets) == 0 { diff --git a/tools/distribute/flypkgs/errors.go b/tools/distribute/flypkgs/errors.go index 1eb68cb848..3e0d840e64 100644 --- a/tools/distribute/flypkgs/errors.go +++ b/tools/distribute/flypkgs/errors.go @@ -14,9 +14,10 @@ type ErrorResponse struct { func (e ErrorResponse) Error() string { var sb strings.Builder - sb.WriteString(fmt.Sprintf("API error: %d\n", e.Code)) + + fmt.Fprintf(&sb, "API error: %d\n", e.Code) for _, msg := range e.Messages { - sb.WriteString(fmt.Sprintf(" - %s\n", msg)) + fmt.Fprintf(&sb, " - %s\n", msg) } return sb.String() diff --git a/wg/ws.go b/wg/ws.go index bc98497706..2fc8d66ed0 100644 --- a/wg/ws.go +++ b/wg/ws.go @@ -30,21 +30,6 @@ func ConnectWS(ctx context.Context, state *WireGuardState) (*Tunnel, error) { return t, err } -func write(w io.Writer, buf []byte) error { - var lbuf [4]byte - binary.BigEndian.PutUint32(lbuf[:], uint32(len(buf))) - if _, err := w.Write(lbuf[:]); err != nil { - return err - } - - if len(buf) == 0 { - return nil - } - - _, err := w.Write(buf) - return err -} - func read(r io.Reader, rbuf []byte) ([]byte, error) { var lbuf [4]byte if _, err := io.ReadFull(r, lbuf[:]); err != nil { @@ -189,7 +174,8 @@ func (wswg *WsWgProxy) wsWrite(c net.Conn, b []byte) error { wswg.wrlock.Lock() defer wswg.wrlock.Unlock() - return write(c, b) + _, err := c.Write(b) + return err } func (wswg *WsWgProxy) ws2wg(ctx context.Context) { @@ -218,11 +204,11 @@ func (wswg *WsWgProxy) ws2wg(ctx context.Context) { } func (wswg *WsWgProxy) wg2ws(ctx context.Context) { - buf := make([]byte, 2000) + var buf [2000]byte for ctx.Err() == nil { wswg.plugConn.SetReadDeadline(time.Now().Add(5 * time.Second)) - n, a, err := wswg.plugConn.ReadFrom(buf) + n, a, err := wswg.plugConn.ReadFrom(buf[4:]) if err != nil { if isTimeout(err) { continue @@ -231,13 +217,14 @@ func (wswg *WsWgProxy) wg2ws(ctx context.Context) { // resetting won't do anything here log.Printf("error reading from udp plugboard: %s", err) } + binary.BigEndian.PutUint32(buf[:], uint32(n)) wswg.lock.Lock() wswg.lastPlugAddr = a c := wswg.wsConn wswg.lock.Unlock() - if err = wswg.wsWrite(c, buf[:n]); err != nil { + if err = wswg.wsWrite(c, buf[:n+4]); err != nil { wswg.resetConn(c, err) } @@ -303,6 +290,8 @@ func websocketConnect(ctx context.Context, endpoint string) (int, error) { go wswg.ws2wg(ctx) go wswg.wg2ws(ctx) + zeroLenMsg := make([]byte, 4) + for ctx.Err() == nil { time.Sleep(1 * time.Second) @@ -311,7 +300,7 @@ func websocketConnect(ctx context.Context, endpoint string) (int, error) { c := wswg.wsConn wswg.lock.RUnlock() - if err := wswg.wsWrite(c, nil); err != nil { + if err := wswg.wsWrite(c, zeroLenMsg); err != nil { wswg.resetConn(c, err) } } From a9b39960e02ebed464853e24a5cd8d4cbfba876c Mon Sep 17 00:00:00 2001 From: Lubien Date: Thu, 9 Oct 2025 18:10:35 -0300 Subject: [PATCH 100/104] deploy fix: use image ref directly if image resolver fails (#4596) Co-authored-by: Will Jordan --- internal/command/deploy/deploy_build.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/internal/command/deploy/deploy_build.go b/internal/command/deploy/deploy_build.go index a8a8192249..297300b4cf 100644 --- a/internal/command/deploy/deploy_build.go +++ b/internal/command/deploy/deploy_build.go @@ -143,7 +143,11 @@ func determineImage(ctx context.Context, appConfig *appconfig.Config, useWG, rec img, err = resolver.ResolveReference(ctx, io, opts) if err != nil { tracing.RecordError(span, err, "failed to resolve reference for prebuilt docker image") - return + img = &imgsrc.DeploymentImage{ + ID: imageRef, + Tag: imageRef, + } + err = nil } span.AddEvent("using pre-built docker image") From 610c77332da14ac3aa8e35711d9e9b1584c08471 Mon Sep 17 00:00:00 2001 From: Will Jordan Date: Thu, 16 Oct 2025 18:47:32 -0700 Subject: [PATCH 101/104] Pass builder ID through fly deploy --image build strategy (#4601) * Pass builder ID through fly deploy --image build strategy Pull the `fly_builder_id` image manifest annotation for machine deployment. This allows deploys using separate `fly deploy --build-only --push` / `fly deploy --image` commands to correctly reference images in the local registry-mirror. * fly-go v0.1.58 --- go.mod | 4 ++-- go.sum | 4 ++-- internal/build/imgsrc/remote_image_resolver.go | 7 +++++++ 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index ce6998deb6..87841d35d1 100644 --- a/go.mod +++ b/go.mod @@ -60,7 +60,6 @@ require ( github.com/novln/docker-parser v1.0.0 github.com/oklog/ulid/v2 v2.1.0 github.com/olekukonko/tablewriter v0.0.5 - github.com/opencontainers/image-spec v1.1.0 github.com/pelletier/go-toml/v2 v2.2.4 github.com/pkg/errors v0.9.1 github.com/pkg/sftp v1.13.9 @@ -75,7 +74,7 @@ require ( github.com/spf13/pflag v1.0.9 github.com/spf13/viper v1.20.1 github.com/stretchr/testify v1.11.1 - github.com/superfly/fly-go v0.1.57 + github.com/superfly/fly-go v0.1.58 github.com/superfly/graphql v0.2.6 github.com/superfly/lfsc-go v0.1.1 github.com/superfly/macaroon v0.3.0 @@ -236,6 +235,7 @@ require ( github.com/nats-io/nkeys v0.4.11 // indirect github.com/nats-io/nuid v1.0.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0 github.com/opencontainers/selinux v1.11.1 // indirect github.com/pierrec/lz4/v4 v4.1.17 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect diff --git a/go.sum b/go.sum index f61f1f4c3c..670c546a42 100644 --- a/go.sum +++ b/go.sum @@ -635,8 +635,8 @@ github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/superfly/fly-go v0.1.57 h1:d3p8i6YKqXKSJSxxNW7yWrRdCSBqXJkwduJ94ffB1Hs= -github.com/superfly/fly-go v0.1.57/go.mod h1:wpq4XNor10w9KurA15CBYRnhtT2mnemAXYHuqkhp2vI= +github.com/superfly/fly-go v0.1.58 h1:3e47bP5s+NeNNveo8InotMqWnR+OalKky2fitbGCftU= +github.com/superfly/fly-go v0.1.58/go.mod h1:wpq4XNor10w9KurA15CBYRnhtT2mnemAXYHuqkhp2vI= github.com/superfly/graphql v0.2.6 h1:zppbodNerWecoXEdjkhrqaNaSjGqobhXNlViHFuZzb4= github.com/superfly/graphql v0.2.6/go.mod h1:CVfDl31srm8HnJ9udwLu6hFNUW/P6GUM2dKcG1YQ8jc= github.com/superfly/lfsc-go v0.1.1 h1:dGjLgt81D09cG+aR9lJZIdmonjZSR5zYCi7s54+ZU2Q= diff --git a/internal/build/imgsrc/remote_image_resolver.go b/internal/build/imgsrc/remote_image_resolver.go index 3e7cf11117..6dd289844d 100644 --- a/internal/build/imgsrc/remote_image_resolver.go +++ b/internal/build/imgsrc/remote_image_resolver.go @@ -56,6 +56,13 @@ func (s *remoteImageResolver) Run(ctx context.Context, _ *dockerClientFactory, s Size: int64(size), } + if img.Manifest != nil && img.Manifest.Annotations != nil { + if id, ok := img.Manifest.Annotations["fly_builder_id"]; ok { + di.BuilderID = id + build.BuilderMeta.RemoteMachineId = id + } + } + span.SetAttributes(di.ToSpanAttributes()...) return di, "", nil From f87a793f3208956e8939829ac5f85ac56a9f88b0 Mon Sep 17 00:00:00 2001 From: Lubien Date: Fri, 17 Oct 2025 08:00:45 -0300 Subject: [PATCH 102/104] Deployer experiment early git push (#4610) Early push files Co-authored-by: Will Jordan --- deploy.rb | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/deploy.rb b/deploy.rb index b89947f216..8dc19d6407 100755 --- a/deploy.rb +++ b/deploy.rb @@ -328,6 +328,17 @@ APP_NAME = DEPLOY_APP_NAME || fly_config["app"] +if CAN_CREATE_AND_PUSH_BRANCH + in_step Step::CREATE_AND_PUSH_BRANCH do + exec_capture("git checkout -b #{FLYIO_BRANCH_NAME}") + exec_capture("git config user.name \"Fly.io\"") + exec_capture("git config user.email \"noreply@fly.io\"") + exec_capture("git add .") + exec_capture("git commit -m \"New files from Fly.io Launch\" || echo \"No changes to commit\"") + exec_capture("git push -f origin #{FLYIO_BRANCH_NAME}") + end +end + image_ref = if !DEPLOY_IMAGE_REF.nil? DEPLOY_IMAGE_REF else @@ -457,17 +468,6 @@ end end -if CAN_CREATE_AND_PUSH_BRANCH - in_step Step::CREATE_AND_PUSH_BRANCH do - exec_capture("git checkout -b #{FLYIO_BRANCH_NAME}") - exec_capture("git config user.name \"Fly.io\"") - exec_capture("git config user.email \"noreply@fly.io\"") - exec_capture("git add .") - exec_capture("git commit -m \"New files from Fly.io Launch\" || echo \"No changes to commit\"") - exec_capture("git push -f origin #{FLYIO_BRANCH_NAME}") - end -end - if !get_env("DEPLOYER_CLEANUP_BEFORE_EXIT").nil? if GIT_REPO `git clean -f -x -d` From 66e95e7d1fac634920ba59c4d2802dc4cdafd31a Mon Sep 17 00:00:00 2001 From: Lubien Date: Wed, 29 Oct 2025 11:48:05 -0300 Subject: [PATCH 103/104] require uiex in all plan commands (#4625) --- internal/command/launch/plan_commands.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/internal/command/launch/plan_commands.go b/internal/command/launch/plan_commands.go index afae7b28a0..b1bfb7ced7 100644 --- a/internal/command/launch/plan_commands.go +++ b/internal/command/launch/plan_commands.go @@ -15,7 +15,7 @@ import ( func NewPlan() *cobra.Command { const desc = `[experimental] Granular subcommands for creating and configuring apps` - cmd := command.New("plan", desc, desc, nil, command.RequireSession, command.LoadAppConfigIfPresent) + cmd := command.New("plan", desc, desc, nil, command.RequireSession, command.RequireUiex, command.LoadAppConfigIfPresent) cmd.Args = cobra.NoArgs cmd.AddCommand(newPropose()) @@ -33,7 +33,7 @@ func NewPlan() *cobra.Command { func newPropose() *cobra.Command { const desc = "[experimental] propose a plan based on scanning the source code or Dockerfile" - cmd := command.New("propose", desc, desc, runPropose, command.LoadAppConfigIfPresent) + cmd := command.New("propose", desc, desc, runPropose, command.LoadAppConfigIfPresent, command.RequireUiex) flag.Add(cmd, flag.Region(), @@ -92,7 +92,7 @@ func newPropose() *cobra.Command { func newCreate() *cobra.Command { const desc = "[experimental] create application" - cmd := command.New("create", desc, desc, runCreate) + cmd := command.New("create", desc, desc, runCreate, command.RequireUiex) cmd.Args = cobra.ExactArgs(1) flag.Add(cmd, @@ -116,7 +116,7 @@ func newCreate() *cobra.Command { func newPostgres() *cobra.Command { const desc = "[experimental] create postgres database" - cmd := command.New("postgres", desc, desc, runPostgres) + cmd := command.New("postgres", desc, desc, runPostgres, command.RequireUiex) cmd.Args = cobra.ExactArgs(1) flag.Add(cmd, @@ -133,7 +133,7 @@ func newPostgres() *cobra.Command { func newRedis() *cobra.Command { const desc = "[experimental] create redis database" - cmd := command.New("redis", desc, desc, runRedis) + cmd := command.New("redis", desc, desc, runRedis, command.RequireUiex) cmd.Args = cobra.ExactArgs(1) flag.Add(cmd, @@ -150,7 +150,7 @@ func newRedis() *cobra.Command { func newTigris() *cobra.Command { const desc = "[experimental] create tigris database" - cmd := command.New("tigris", desc, desc, runTigris) + cmd := command.New("tigris", desc, desc, runTigris, command.RequireUiex) cmd.Args = cobra.ExactArgs(1) flag.Add(cmd, @@ -167,7 +167,7 @@ func newTigris() *cobra.Command { func newGenerate() *cobra.Command { const desc = "[experimental] generate Dockerfile and other configuration files based on the plan" - cmd := command.New("generate", desc, desc, runGenerate) + cmd := command.New("generate", desc, desc, runGenerate, command.RequireUiex) cmd.Args = cobra.ExactArgs(1) flag.Add(cmd, From 9767ed62785c2bdbfea127f971965c53530c4b51 Mon Sep 17 00:00:00 2001 From: Lubien Date: Fri, 31 Oct 2025 15:09:38 -0300 Subject: [PATCH 104/104] files from diff (#4626) * files from diff * typo * fix I guess * accept staged files --- deploy.rb | 51 ++++++++++++++++++++++++++++++++++++++++++++++++ deploy/common.rb | 1 + 2 files changed, 52 insertions(+) diff --git a/deploy.rb b/deploy.rb index 8dc19d6407..a201c4565f 100755 --- a/deploy.rb +++ b/deploy.rb @@ -115,6 +115,39 @@ Dir.chdir(DEPLOYER_SOURCE_CWD) end + +# Check if staged-files directory exists and has files +if Dir.exist?('/tmp/staged-files/') && !Dir.empty?('/tmp/staged-files/') + in_step Step::GIT_PULL do + def copy_files_preserving_structure(source_dir, target_dir) + Dir.glob(File.join(source_dir, '**', '*'), File::FNM_DOTMATCH).each do |file| + # Skip . and .. directories + next if File.basename(file) == '.' || File.basename(file) == '..' + + # Get the relative path from source_dir + relative_path = file.sub(source_dir, '') + target_path = File.join(target_dir, relative_path) + + if File.directory?(file) + # Create directory if it doesn't exist + FileUtils.mkdir_p(target_path) unless Dir.exist?(target_path) + else + # Create parent directories if they don't exist + FileUtils.mkdir_p(File.dirname(target_path)) unless Dir.exist?(File.dirname(target_path)) + + # Copy the file, overwriting if it exists + FileUtils.cp(file, target_path, preserve: true) + info("Copied #{file} to #{target_path}") + end + end + end + + # Copy files from staged-files to current directory + copy_files_preserving_structure('/tmp/staged-files/', Dir.pwd) + info("Finished copying staged files") + end +end + if !DEPLOYER_FLY_CONFIG_PATH.nil? && !File.exists?(DEPLOYER_FLY_CONFIG_PATH) event :error, { type: :validation, message: "Config file #{DEPLOYER_FLY_CONFIG_PATH} does not exist" } exit 1 @@ -315,6 +348,24 @@ exec_capture("git add -A", log: false) diff = exec_capture("git diff --cached", log: false) artifact Artifact::DIFF, { output: diff } + + files = [] + begin + diff_files = diff.scan(%r{diff --git a/(.*?) b/}) + diff_files.each do |match| + file_path = match[0] + if file_path && !file_path.empty? + # Check if file exists and is readable before trying to read it + files << { relative_path: file_path, content: File.read(file_path) } if File.exist?(file_path) && File.readable?(file_path) + end + rescue StandardError => e + info("Error parsing diff file: #{e.message}") + end + rescue StandardError => e + error(e.message) + end + + artifact Artifact::FILES, { output: files } end end end diff --git a/deploy/common.rb b/deploy/common.rb index c854a1f9a6..ab943f6dd8 100644 --- a/deploy/common.rb +++ b/deploy/common.rb @@ -39,6 +39,7 @@ module Artifact MANIFEST = :manifest SESSION = :session DIFF = :diff + FILES = :files FLY_POSTGRES = :fly_postgres SUPABASE_POSTGRES = :supabase_postgres UPSTASH_REDIS = :upstash_redis