diff --git a/cmd/envbuilder/main.go b/cmd/envbuilder/main.go index fcbf26d0..1910568e 100644 --- a/cmd/envbuilder/main.go +++ b/cmd/envbuilder/main.go @@ -36,6 +36,7 @@ func envbuilderCmd() serpent.Command { Use: "envbuilder", Options: o.CLI(), Handler: func(inv *serpent.Invocation) error { + o.SetDefaults() o.Logger = log.New(os.Stderr, o.Verbose) if o.CoderAgentURL != "" { if o.CoderAgentToken == "" { @@ -63,6 +64,19 @@ func envbuilderCmd() serpent.Command { } } + if o.GetCachedImage { + img, err := envbuilder.RunCacheProbe(inv.Context(), o) + if err != nil { + o.Logger(log.LevelError, "error: %s", err) + } + digest, err := img.Digest() + if err != nil { + return fmt.Errorf("get cached image digest: %w", err) + } + _, _ = fmt.Fprintf(inv.Stdout, "ENVBUILDER_CACHED_IMAGE=%s@%s\n", o.CacheRepo, digest.String()) + return nil + } + err := envbuilder.Run(inv.Context(), o) if err != nil { o.Logger(log.LevelError, "error: %s", err) diff --git a/envbuilder.go b/envbuilder.go index 98adab03..dc2ead8e 100644 --- a/envbuilder.go +++ b/envbuilder.go @@ -21,6 +21,7 @@ import ( "sort" "strconv" "strings" + "sync" "syscall" "time" @@ -59,7 +60,10 @@ type DockerConfig configfile.ConfigFile // Filesystem is the filesystem to use for all operations. // Defaults to the host filesystem. func Run(ctx context.Context, opts options.Options) error { - opts.SetDefaults() + defer options.UnsetEnv() + if opts.GetCachedImage { + return fmt.Errorf("developer error: use RunCacheProbe instead") + } if opts.CacheRepo == "" && opts.PushImage { return fmt.Errorf("--cache-repo must be set when using --push-image") @@ -89,41 +93,20 @@ func Run(ctx context.Context, opts options.Options) error { opts.Logger(log.LevelInfo, "%s - Build development environments from repositories in a container", newColor(color.Bold).Sprintf("envbuilder")) var caBundle []byte - if opts.SSLCertBase64 != "" { - certPool, err := x509.SystemCertPool() - if err != nil { - return xerrors.Errorf("get global system cert pool: %w", err) - } - data, err := base64.StdEncoding.DecodeString(opts.SSLCertBase64) - if err != nil { - return xerrors.Errorf("base64 decode ssl cert: %w", err) - } - ok := certPool.AppendCertsFromPEM(data) - if !ok { - return xerrors.Errorf("failed to append the ssl cert to the global pool: %s", data) - } - caBundle = data + caBundle, err := initCABundle(opts.SSLCertBase64) + if err != nil { + return err } - if opts.DockerConfigBase64 != "" { - decoded, err := base64.StdEncoding.DecodeString(opts.DockerConfigBase64) - if err != nil { - return fmt.Errorf("decode docker config: %w", err) - } - var configFile DockerConfig - decoded, err = hujson.Standardize(decoded) - if err != nil { - return fmt.Errorf("humanize json for docker config: %w", err) - } - err = json.Unmarshal(decoded, &configFile) - if err != nil { - return fmt.Errorf("parse docker config: %w", err) - } - err = os.WriteFile(filepath.Join(constants.MagicDir, "config.json"), decoded, 0o644) - if err != nil { - return fmt.Errorf("write docker config: %w", err) - } + cleanupDockerConfigJSON, err := initDockerConfigJSON(opts.DockerConfigBase64) + if err != nil { + return err } + defer func() { + if err := cleanupDockerConfigJSON(); err != nil { + opts.Logger(log.LevelError, "failed to cleanup docker config JSON: %w", err) + } + }() // best effort var fallbackErr error var cloned bool @@ -301,46 +284,16 @@ func Run(ctx context.Context, opts options.Options) error { } }) - var closeAfterBuild func() - // Allows quick testing of layer caching using a local directory! if opts.LayerCacheDir != "" { - cfg := &configuration.Configuration{ - Storage: configuration.Storage{ - "filesystem": configuration.Parameters{ - "rootdirectory": opts.LayerCacheDir, - }, - }, + if opts.CacheRepo != "" { + opts.Logger(log.LevelWarn, "Overriding cache repo with local registry...") } - cfg.Log.Level = "error" - - // Spawn an in-memory registry to cache built layers... - registry := handlers.NewApp(ctx, cfg) - - listener, err := net.Listen("tcp", "127.0.0.1:0") + localRegistry, closeLocalRegistry, err := serveLocalRegistry(ctx, opts.Logger, opts.LayerCacheDir) if err != nil { return err } - tcpAddr, ok := listener.Addr().(*net.TCPAddr) - if !ok { - return fmt.Errorf("listener addr was of wrong type: %T", listener.Addr()) - } - srv := &http.Server{ - Handler: registry, - } - go func() { - err := srv.Serve(listener) - if err != nil && !errors.Is(err, http.ErrServerClosed) { - opts.Logger(log.LevelError, "Failed to serve registry: %s", err.Error()) - } - }() - closeAfterBuild = func() { - _ = srv.Close() - _ = listener.Close() - } - if opts.CacheRepo != "" { - opts.Logger(log.LevelWarn, "Overriding cache repo with local registry...") - } - opts.CacheRepo = fmt.Sprintf("localhost:%d/local/cache", tcpAddr.Port) + defer closeLocalRegistry() + opts.CacheRepo = localRegistry } // IgnorePaths in the Kaniko opts doesn't properly ignore paths. @@ -404,6 +357,10 @@ ENTRYPOINT [%q]`, exePath, exePath, exePath) } skippedRebuild := false + stdoutWriter, closeStdout := log.Writer(opts.Logger) + defer closeStdout() + stderrWriter, closeStderr := log.Writer(opts.Logger) + defer closeStderr() build := func() (v1.Image, error) { _, err := opts.Filesystem.Stat(constants.MagicFile) if err == nil && opts.SkipRebuild { @@ -432,25 +389,26 @@ ENTRYPOINT [%q]`, exePath, exePath, exePath) if err := maybeDeleteFilesystem(opts.Logger, opts.ForceSafe); err != nil { return nil, fmt.Errorf("delete filesystem: %w", err) } - - stdoutReader, stdoutWriter := io.Pipe() - stderrReader, stderrWriter := io.Pipe() - defer stdoutReader.Close() - defer stdoutWriter.Close() - defer stderrReader.Close() - defer stderrWriter.Close() - go func() { - scanner := bufio.NewScanner(stdoutReader) - for scanner.Scan() { - opts.Logger(log.LevelInfo, "%s", scanner.Text()) - } - }() - go func() { - scanner := bufio.NewScanner(stderrReader) - for scanner.Scan() { - opts.Logger(log.LevelInfo, "%s", scanner.Text()) - } - }() + /* + stdoutReader, stdoutWriter := io.Pipe() + stderrReader, stderrWriter := io.Pipe() + defer stdoutReader.Close() + defer stdoutWriter.Close() + defer stderrReader.Close() + defer stderrWriter.Close() + go func() { + scanner := bufio.NewScanner(stdoutReader) + for scanner.Scan() { + opts.Logger(log.LevelInfo, "%s", scanner.Text()) + } + }() + go func() { + scanner := bufio.NewScanner(stderrReader) + for scanner.Scan() { + opts.Logger(log.LevelInfo, "%s", scanner.Text()) + } + }() + */ cacheTTL := time.Hour * 24 * 7 if opts.CacheTTLDays != 0 { cacheTTL = time.Hour * 24 * time.Duration(opts.CacheTTLDays) @@ -483,7 +441,6 @@ ENTRYPOINT [%q]`, exePath, exePath, exePath) // https://github.com/klauspost/compress/blob/67a538e2b4df11f8ec7139388838a13bce84b5d5/zstd/encoder_options.go#L188 CompressionLevel: 3, CacheOptions: config.CacheOptions{ - // Cache for a week by default! CacheTTL: cacheTTL, CacheDir: opts.BaseImageCacheDir, }, @@ -509,21 +466,6 @@ ENTRYPOINT [%q]`, exePath, exePath, exePath) Reproducible: opts.PushImage, } - if opts.GetCachedImage { - endStage := startStage("🏗️ Checking for cached image...") - image, err := executor.DoCacheProbe(kOpts) - if err != nil { - return nil, xerrors.Errorf("get cached image: %w", err) - } - digest, err := image.Digest() - if err != nil { - return nil, xerrors.Errorf("get cached image digest: %w", err) - } - endStage("🏗️ Found cached image!") - _, _ = fmt.Fprintf(os.Stdout, "ENVBUILDER_CACHED_IMAGE=%s@%s\n", kOpts.CacheRepo, digest.String()) - os.Exit(0) - } - endStage := startStage("🏗️ Building image...") image, err := executor.DoBuild(kOpts) if err != nil { @@ -578,10 +520,6 @@ ENTRYPOINT [%q]`, exePath, exePath, exePath) return fmt.Errorf("build with kaniko: %w", err) } - if closeAfterBuild != nil { - closeAfterBuild() - } - if err := restoreMounts(); err != nil { return fmt.Errorf("restore mounts: %w", err) } @@ -642,16 +580,8 @@ ENTRYPOINT [%q]`, exePath, exePath, exePath) options.UnsetEnv() // Remove the Docker config secret file! - if opts.DockerConfigBase64 != "" { - c := filepath.Join(constants.MagicDir, "config.json") - err = os.Remove(c) - if err != nil { - if !errors.Is(err, fs.ErrNotExist) { - return fmt.Errorf("remove docker config: %w", err) - } else { - fmt.Fprintln(os.Stderr, "failed to remove the Docker config secret file: %w", c) - } - } + if err := cleanupDockerConfigJSON(); err != nil { + return err } environ, err := os.ReadFile("/etc/environment") @@ -902,6 +832,327 @@ ENTRYPOINT [%q]`, exePath, exePath, exePath) return nil } +// RunCacheProbe performs a 'dry-run' build of the image and checks that +// all of the resulting layers are present in options.CacheRepo. +func RunCacheProbe(ctx context.Context, opts options.Options) (v1.Image, error) { + defer options.UnsetEnv() + if !opts.GetCachedImage { + return nil, fmt.Errorf("developer error: RunCacheProbe must be run with --get-cached-image") + } + if opts.CacheRepo == "" { + return nil, fmt.Errorf("--cache-repo must be set when using --get-cached-image") + } + + stageNumber := 0 + startStage := func(format string, args ...any) func(format string, args ...any) { + now := time.Now() + stageNumber++ + stageNum := stageNumber + opts.Logger(log.LevelInfo, "#%d: %s", stageNum, fmt.Sprintf(format, args...)) + + return func(format string, args ...any) { + opts.Logger(log.LevelInfo, "#%d: %s [%s]", stageNum, fmt.Sprintf(format, args...), time.Since(now)) + } + } + + opts.Logger(log.LevelInfo, "%s - Build development environments from repositories in a container", newColor(color.Bold).Sprintf("envbuilder")) + + caBundle, err := initCABundle(opts.SSLCertBase64) + if err != nil { + return nil, err + } + + cleanupDockerConfigJSON, err := initDockerConfigJSON(opts.DockerConfigBase64) + if err != nil { + return nil, err + } + defer func() { + if err := cleanupDockerConfigJSON(); err != nil { + opts.Logger(log.LevelError, "failed to cleanup docker config JSON: %w", err) + } + }() // best effort + + var fallbackErr error + var cloned bool + if opts.GitURL != "" { + endStage := startStage("📦 Cloning %s to %s...", + newColor(color.FgCyan).Sprintf(opts.GitURL), + newColor(color.FgCyan).Sprintf(opts.WorkspaceFolder), + ) + + reader, writer := io.Pipe() + defer reader.Close() + defer writer.Close() + go func() { + data := make([]byte, 4096) + for { + read, err := reader.Read(data) + if err != nil { + return + } + content := data[:read] + for _, line := range strings.Split(string(content), "\r") { + if line == "" { + continue + } + opts.Logger(log.LevelInfo, "#1: %s", strings.TrimSpace(line)) + } + } + }() + + cloneOpts := git.CloneRepoOptions{ + Path: opts.WorkspaceFolder, + Storage: opts.Filesystem, + Insecure: opts.Insecure, + Progress: writer, + SingleBranch: opts.GitCloneSingleBranch, + Depth: int(opts.GitCloneDepth), + CABundle: caBundle, + } + + cloneOpts.RepoAuth = git.SetupRepoAuth(&opts) + if opts.GitHTTPProxyURL != "" { + cloneOpts.ProxyOptions = transport.ProxyOptions{ + URL: opts.GitHTTPProxyURL, + } + } + cloneOpts.RepoURL = opts.GitURL + + cloned, fallbackErr = git.CloneRepo(ctx, cloneOpts) + if fallbackErr == nil { + if cloned { + endStage("📦 Cloned repository!") + } else { + endStage("📦 The repository already exists!") + } + } else { + opts.Logger(log.LevelError, "Failed to clone repository: %s", fallbackErr.Error()) + opts.Logger(log.LevelError, "Falling back to the default image...") + } + } + + defaultBuildParams := func() (*devcontainer.Compiled, error) { + dockerfile := filepath.Join(constants.MagicDir, "Dockerfile") + file, err := opts.Filesystem.OpenFile(dockerfile, os.O_CREATE|os.O_WRONLY, 0o644) + if err != nil { + return nil, err + } + defer file.Close() + if opts.FallbackImage == "" { + if fallbackErr != nil { + return nil, fmt.Errorf("%s: %w", fallbackErr.Error(), constants.ErrNoFallbackImage) + } + // We can't use errors.Join here because our tests + // don't support parsing a multiline error. + return nil, constants.ErrNoFallbackImage + } + content := "FROM " + opts.FallbackImage + _, err = file.Write([]byte(content)) + if err != nil { + return nil, err + } + return &devcontainer.Compiled{ + DockerfilePath: dockerfile, + DockerfileContent: content, + BuildContext: constants.MagicDir, + }, nil + } + + var ( + buildParams *devcontainer.Compiled + devcontainerPath string + ) + if opts.DockerfilePath == "" { + // Only look for a devcontainer if a Dockerfile wasn't specified. + // devcontainer is a standard, so it's reasonable to be the default. + var devcontainerDir string + var err error + devcontainerPath, devcontainerDir, err = findDevcontainerJSON(opts) + if err != nil { + opts.Logger(log.LevelError, "Failed to locate devcontainer.json: %s", err.Error()) + opts.Logger(log.LevelError, "Falling back to the default image...") + } else { + // We know a devcontainer exists. + // Let's parse it and use it! + file, err := opts.Filesystem.Open(devcontainerPath) + if err != nil { + return nil, fmt.Errorf("open devcontainer.json: %w", err) + } + defer file.Close() + content, err := io.ReadAll(file) + if err != nil { + return nil, fmt.Errorf("read devcontainer.json: %w", err) + } + devContainer, err := devcontainer.Parse(content) + if err == nil { + var fallbackDockerfile string + if !devContainer.HasImage() && !devContainer.HasDockerfile() { + defaultParams, err := defaultBuildParams() + if err != nil { + return nil, fmt.Errorf("no Dockerfile or image found: %w", err) + } + opts.Logger(log.LevelInfo, "No Dockerfile or image specified; falling back to the default image...") + fallbackDockerfile = defaultParams.DockerfilePath + } + buildParams, err = devContainer.Compile(opts.Filesystem, devcontainerDir, constants.MagicDir, fallbackDockerfile, opts.WorkspaceFolder, false, os.LookupEnv) + if err != nil { + return nil, fmt.Errorf("compile devcontainer.json: %w", err) + } + } else { + opts.Logger(log.LevelError, "Failed to parse devcontainer.json: %s", err.Error()) + opts.Logger(log.LevelError, "Falling back to the default image...") + } + } + } else { + // If a Dockerfile was specified, we use that. + dockerfilePath := filepath.Join(opts.WorkspaceFolder, opts.DockerfilePath) + + // If the dockerfilePath is specified and deeper than the base of WorkspaceFolder AND the BuildContextPath is + // not defined, show a warning + dockerfileDir := filepath.Dir(dockerfilePath) + if dockerfileDir != filepath.Clean(opts.WorkspaceFolder) && opts.BuildContextPath == "" { + opts.Logger(log.LevelWarn, "given dockerfile %q is below %q and no custom build context has been defined", dockerfilePath, opts.WorkspaceFolder) + opts.Logger(log.LevelWarn, "\t-> set BUILD_CONTEXT_PATH to %q to fix", dockerfileDir) + } + + dockerfile, err := opts.Filesystem.Open(dockerfilePath) + if err == nil { + content, err := io.ReadAll(dockerfile) + if err != nil { + return nil, fmt.Errorf("read Dockerfile: %w", err) + } + buildParams = &devcontainer.Compiled{ + DockerfilePath: dockerfilePath, + DockerfileContent: string(content), + BuildContext: filepath.Join(opts.WorkspaceFolder, opts.BuildContextPath), + } + } + } + + // When probing the build cache, there is no fallback! + if buildParams == nil { + return nil, fmt.Errorf("no Dockerfile or devcontainer.json found") + } + + HijackLogrus(func(entry *logrus.Entry) { + for _, line := range strings.Split(entry.Message, "\r") { + opts.Logger(log.LevelInfo, "#%d: %s", stageNumber, color.HiBlackString(line)) + } + }) + + if opts.LayerCacheDir != "" { + if opts.CacheRepo != "" { + opts.Logger(log.LevelWarn, "Overriding cache repo with local registry...") + } + localRegistry, closeLocalRegistry, err := serveLocalRegistry(ctx, opts.Logger, opts.LayerCacheDir) + if err != nil { + return nil, err + } + defer closeLocalRegistry() + opts.CacheRepo = localRegistry + } + + // IgnorePaths in the Kaniko opts doesn't properly ignore paths. + // So we add them to the default ignore list. See: + // https://github.com/GoogleContainerTools/kaniko/blob/63be4990ca5a60bdf06ddc4d10aa4eca0c0bc714/cmd/executor/cmd/root.go#L136 + ignorePaths := append([]string{ + constants.MagicDir, + opts.WorkspaceFolder, + // See: https://github.com/coder/envbuilder/issues/37 + "/etc/resolv.conf", + }, opts.IgnorePaths...) + + if opts.LayerCacheDir != "" { + ignorePaths = append(ignorePaths, opts.LayerCacheDir) + } + + for _, ignorePath := range ignorePaths { + util.AddToDefaultIgnoreList(util.IgnoreListEntry{ + Path: ignorePath, + PrefixMatchOnly: false, + AllowedPaths: nil, + }) + } + + stdoutWriter, closeStdout := log.Writer(opts.Logger) + defer closeStdout() + stderrWriter, closeStderr := log.Writer(opts.Logger) + defer closeStderr() + cacheTTL := time.Hour * 24 * 7 + if opts.CacheTTLDays != 0 { + cacheTTL = time.Hour * 24 * time.Duration(opts.CacheTTLDays) + } + + // At this point we have all the context, we can now build! + registryMirror := []string{} + if val, ok := os.LookupEnv("KANIKO_REGISTRY_MIRROR"); ok { + registryMirror = strings.Split(val, ";") + } + var destinations []string + if opts.CacheRepo != "" { + destinations = append(destinations, opts.CacheRepo) + } + kOpts := &config.KanikoOptions{ + // Boilerplate! + CustomPlatform: platforms.Format(platforms.Normalize(platforms.DefaultSpec())), + SnapshotMode: "redo", + RunV2: true, + RunStdout: stdoutWriter, + RunStderr: stderrWriter, + Destinations: destinations, + NoPush: !opts.PushImage || len(destinations) == 0, + CacheRunLayers: true, + CacheCopyLayers: true, + CompressedCaching: true, + Compression: config.ZStd, + // Maps to "default" level, ~100-300 MB/sec according to + // benchmarks in klauspost/compress README + // https://github.com/klauspost/compress/blob/67a538e2b4df11f8ec7139388838a13bce84b5d5/zstd/encoder_options.go#L188 + CompressionLevel: 3, + CacheOptions: config.CacheOptions{ + CacheTTL: cacheTTL, + CacheDir: opts.BaseImageCacheDir, + }, + ForceUnpack: true, + BuildArgs: buildParams.BuildArgs, + CacheRepo: opts.CacheRepo, + Cache: opts.CacheRepo != "" || opts.BaseImageCacheDir != "", + DockerfilePath: buildParams.DockerfilePath, + DockerfileContent: buildParams.DockerfileContent, + RegistryOptions: config.RegistryOptions{ + Insecure: opts.Insecure, + InsecurePull: opts.Insecure, + SkipTLSVerify: opts.Insecure, + // Enables registry mirror features in Kaniko, see more in link below + // https://github.com/GoogleContainerTools/kaniko?tab=readme-ov-file#flag---registry-mirror + // Related to PR #114 + // https://github.com/coder/envbuilder/pull/114 + RegistryMirrors: registryMirror, + }, + SrcContext: buildParams.BuildContext, + + // For cached image utilization, produce reproducible builds. + Reproducible: opts.PushImage, + } + + endStage := startStage("🏗️ Checking for cached image...") + image, err := executor.DoCacheProbe(kOpts) + if err != nil { + return nil, fmt.Errorf("get cached image: %w", err) + } + endStage("🏗️ Found cached image!") + + // Sanitize the environment of any opts! + options.UnsetEnv() + + // Remove the Docker config secret file! + if err := cleanupDockerConfigJSON(); err != nil { + return nil, err + } + + return image, nil +} + type userInfo struct { uid int gid int @@ -1123,17 +1374,125 @@ func maybeDeleteFilesystem(logger log.Func, force bool) error { func copyFile(src, dst string) error { content, err := os.ReadFile(src) if err != nil { - return xerrors.Errorf("read file failed: %w", err) + return fmt.Errorf("read file failed: %w", err) } err = os.MkdirAll(filepath.Dir(dst), 0o755) if err != nil { - return xerrors.Errorf("mkdir all failed: %w", err) + return fmt.Errorf("mkdir all failed: %w", err) } err = os.WriteFile(dst, content, 0o644) if err != nil { - return xerrors.Errorf("write file failed: %w", err) + return fmt.Errorf("write file failed: %w", err) } return nil } + +func initCABundle(sslCertBase64 string) ([]byte, error) { + if sslCertBase64 == "" { + return []byte{}, nil + } + certPool, err := x509.SystemCertPool() + if err != nil { + return nil, fmt.Errorf("get global system cert pool: %w", err) + } + data, err := base64.StdEncoding.DecodeString(sslCertBase64) + if err != nil { + return nil, fmt.Errorf("base64 decode ssl cert: %w", err) + } + ok := certPool.AppendCertsFromPEM(data) + if !ok { + return nil, fmt.Errorf("failed to append the ssl cert to the global pool: %s", data) + } + return data, nil +} + +func initDockerConfigJSON(dockerConfigBase64 string) (func() error, error) { + var cleanupOnce sync.Once + noop := func() error { return nil } + if dockerConfigBase64 == "" { + return noop, nil + } + cfgPath := filepath.Join(constants.MagicDir, "config.json") + decoded, err := base64.StdEncoding.DecodeString(dockerConfigBase64) + if err != nil { + return noop, fmt.Errorf("decode docker config: %w", err) + } + var configFile DockerConfig + decoded, err = hujson.Standardize(decoded) + if err != nil { + return noop, fmt.Errorf("humanize json for docker config: %w", err) + } + err = json.Unmarshal(decoded, &configFile) + if err != nil { + return noop, fmt.Errorf("parse docker config: %w", err) + } + err = os.WriteFile(cfgPath, decoded, 0o644) + if err != nil { + return noop, fmt.Errorf("write docker config: %w", err) + } + cleanup := func() error { + var cleanupErr error + cleanupOnce.Do(func() { + // Remove the Docker config secret file! + if cleanupErr = os.Remove(cfgPath); err != nil { + if !errors.Is(err, fs.ErrNotExist) { + cleanupErr = fmt.Errorf("remove docker config: %w", cleanupErr) + } + _, _ = fmt.Fprintf(os.Stderr, "failed to remove the Docker config secret file: %s\n", cleanupErr) + } + }) + return cleanupErr + } + return cleanup, err +} + +// Allows quick testing of layer caching using a local directory! +func serveLocalRegistry(ctx context.Context, logf log.Func, layerCacheDir string) (string, func(), error) { + noop := func() {} + if layerCacheDir == "" { + return "", noop, nil + } + cfg := &configuration.Configuration{ + Storage: configuration.Storage{ + "filesystem": configuration.Parameters{ + "rootdirectory": layerCacheDir, + }, + }, + } + cfg.Log.Level = "error" + + // Spawn an in-memory registry to cache built layers... + registry := handlers.NewApp(ctx, cfg) + + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return "", nil, fmt.Errorf("start listener for in-memory registry: %w", err) + } + tcpAddr, ok := listener.Addr().(*net.TCPAddr) + if !ok { + return "", noop, fmt.Errorf("listener addr was of wrong type: %T", listener.Addr()) + } + srv := &http.Server{ + Handler: registry, + } + done := make(chan struct{}) + go func() { + defer close(done) + err := srv.Serve(listener) + if err != nil && !errors.Is(err, http.ErrServerClosed) { + logf(log.LevelError, "Failed to serve registry: %s", err.Error()) + } + }() + var closeOnce sync.Once + closer := func() { + closeOnce.Do(func() { + _ = srv.Close() + _ = listener.Close() + <-done + }) + } + addr := fmt.Sprintf("localhost:%d/local/cache", tcpAddr.Port) + return addr, closer, nil +} diff --git a/envbuilder_test.go b/envbuilder_test.go deleted file mode 100644 index aa4205c7..00000000 --- a/envbuilder_test.go +++ /dev/null @@ -1 +0,0 @@ -package envbuilder_test diff --git a/log/log.go b/log/log.go index da308266..8519d6b0 100644 --- a/log/log.go +++ b/log/log.go @@ -1,6 +1,7 @@ package log import ( + "bufio" "fmt" "io" "strings" @@ -45,3 +46,31 @@ func Wrap(fs ...Func) Func { } } } + +// Writer returns an io.Writer that logs all writes in a separate goroutine. +// It is the responsibility of the caller to call the returned +// function to stop the goroutine. +func Writer(logf Func) (io.Writer, func()) { + pipeReader, pipeWriter := io.Pipe() + doneCh := make(chan struct{}) + go func() { + defer pipeWriter.Close() + defer pipeReader.Close() + scanner := bufio.NewScanner(pipeReader) + for { + select { + case <-doneCh: + return + default: + if !scanner.Scan() { + return + } + logf(LevelInfo, "%s", scanner.Text()) + } + } + }() + closer := func() { + close(doneCh) + } + return pipeWriter, closer +}