diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index f3ffc86..c106dd0 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -8,7 +8,7 @@ on: pull_request: workflow_dispatch: - + permissions: actions: read checks: none @@ -209,7 +209,7 @@ jobs: category: "Trivy" - name: Upload Trivy scan results as an artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: trivy path: trivy-results.sarif diff --git a/Makefile b/Makefile index a2c8341..b230ab7 100644 --- a/Makefile +++ b/Makefile @@ -39,4 +39,4 @@ test: .PHONY: test-integration test-integration: - go test -v -count=1 -tags=integration ./integration/ + CODER_TEST_INTEGRATION=1 go test -v -count=1 ./integration/ diff --git a/README.md b/README.md index fba30bc..437e381 100644 --- a/README.md +++ b/README.md @@ -21,6 +21,7 @@ The environment variables can be used to configure various aspects of the inner | `CODER_DOCKER_BRIDGE_CIDR` | The bridge CIDR to start the Docker daemon with. | false | | `CODER_MOUNTS` | A list of mounts to mount into the inner container. Mounts default to `rw`. Ex: `CODER_MOUNTS=/home/coder:/home/coder,/var/run/mysecret:/var/run/mysecret:ro` | false | | `CODER_USR_LIB_DIR` | The mountpoint of the host `/usr/lib` directory. Only required when using GPUs. | false | +| `CODER_INNER_USR_LIB_DIR` | The inner /usr/lib mountpoint. This is automatically detected based on `/etc/os-release` in the inner image, but may optionally be overridden. | false | | `CODER_ADD_TUN` | If `CODER_ADD_TUN=true` add a TUN device to the inner container. | false | | `CODER_ADD_FUSE` | If `CODER_ADD_FUSE=true` add a FUSE device to the inner container. | false | | `CODER_ADD_GPU` | If `CODER_ADD_GPU=true` add detected GPUs and related files to the inner container. Requires setting `CODER_USR_LIB_DIR` and mounting in the hosts `/usr/lib/` directory. | false | @@ -43,7 +44,7 @@ It is not possible to develop `envbox` effectively using a containerized environ If a login is required to pull images from a private repository, create a secret following the instructions from the [Kubernetes Documentation](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-secret-by-providing-credentials-on-the-command-line) as such: -``` +```shell kubectl -n create secret docker-registry regcred \ --docker-server= \ --docker-username= \ @@ -53,7 +54,7 @@ kubectl -n create secret docker-registry regcred \ Then reference the secret in your template as such: -``` +```shell env { name = "CODER_IMAGE_PULL_SECRET" value_from { @@ -93,11 +94,60 @@ When passing through GPUs to the inner container, you may end up using associate Envbox will detect these mounts and pass them inside the inner container it creates, so that GPU-aware tools run inside the inner container can still utilize these libraries. +Here's an example Docker command to run a GPU-enabled workload in Envbox. Note the following: + +1) The NVidia container runtime must be installed on the host (`--runtime=nvidia`). +2) `CODER_ADD_GPU=true` must be set to enable GPU-specific functionality. +3) When `CODER_ADD_GPU` is set, it is required to also set `CODER_USR_LIB_DIR` to a location where the relevant host directory has been mounted inside the outer container. In the below example, this is `/usr/lib/x86_64-linux-gnu` on the underlying host. It is mounted into the container under the path `/var/coder/usr/lib`. We then set `CODER_USR_LIB_DIR=/var/coder/usr/lib`. The actual location inside the container is not important **as long as it does not overwrite any pre-existing directories containing system libraries**. +4) The location to mount the libraries in the inner container is determined by the distribution ID in the `/etc/os-release` of the inner container. If the automatically determined location is incorrect (e.g. `nvidia-smi` complains about not being able to find libraries), you can set it manually via `CODER_INNER_USR_LIB_DIR`. + + > Note: this step is required in case user workloads require libraries from the underlying host that are not added in by the container runtime. + +```shell +docker run -it --rm \ + --runtime=nvidia \ + --gpus=all \ + --name=envbox-gpu-test \ + -v /tmp/envbox/docker:/var/lib/coder/docker \ + -v /tmp/envbox/containers:/var/lib/coder/containers \ + -v /tmp/envbox/sysbox:/var/lib/sysbox \ + -v /tmp/envbox/docker:/var/lib/docker \ + -v /usr/src:/usr/src:ro \ + -v /lib/modules:/lib/modules:ro \ + -v /usr/lib/x86_64-linux-gnu:/var/coder/usr/lib \ + --privileged \ + -e CODER_INNER_IMAGE=nvcr.io/nvidia/k8s/cuda-sample:vectoradd-cuda10.2 \ + -e CODER_INNER_USERNAME=root \ + -e CODER_ADD_GPU=true \ + -e CODER_USR_LIB_DIR=/var/coder/usr/lib \ + envbox:latest /envbox docker +``` + +To validate GPU functionality, you can run the following commands: + +1) To validate that the container runtime correctly passed the required GPU tools into the outer container, run: + + ```shell + docker exec -it envbox-gpu-test nvidia-smi + ``` + +2) To validate the same inside the inner container, run: + + ```shell + docker exec -it envbox-gpu-test docker exec -it workspace_cvm nvidia-smi + ``` + +3) To validate that the sample CUDA application inside the container runs correctly: + + ```shell + docker exec -it envbox-gpu-test docker exec -it workspace_cvm /tmp/vectorAdd + ``` + ## Hacking Here's a simple one-liner to run the `codercom/enterprise-minimal:ubuntu` image in Envbox using Docker: -``` +```shell docker run -it --rm \ -v /tmp/envbox/docker:/var/lib/coder/docker \ -v /tmp/envbox/containers:/var/lib/coder/containers \ diff --git a/cli/docker.go b/cli/docker.go index f48ea64..2f6ec5f 100644 --- a/cli/docker.go +++ b/cli/docker.go @@ -98,6 +98,7 @@ var ( EnvMemory = "CODER_MEMORY" EnvAddGPU = "CODER_ADD_GPU" EnvUsrLibDir = "CODER_USR_LIB_DIR" + EnvInnerUsrLibDir = "CODER_INNER_USR_LIB_DIR" EnvDockerConfig = "CODER_DOCKER_CONFIG" EnvDebug = "CODER_DEBUG" EnvDisableIDMappedMount = "CODER_DISABLE_IDMAPPED_MOUNT" @@ -135,6 +136,7 @@ type flags struct { boostrapScript string containerMounts string hostUsrLibDir string + innerUsrLibDir string dockerConfig string cpus int memory int @@ -370,6 +372,7 @@ func dockerCmd() *cobra.Command { cliflag.StringVarP(cmd.Flags(), &flags.boostrapScript, "boostrap-script", "", EnvBootstrap, "", "The script to use to bootstrap the container. This should typically install and start the agent.") cliflag.StringVarP(cmd.Flags(), &flags.containerMounts, "mounts", "", EnvMounts, "", "Comma separated list of mounts in the form of ':[:options]' (e.g. /var/lib/docker:/var/lib/docker:ro,/usr/src:/usr/src).") cliflag.StringVarP(cmd.Flags(), &flags.hostUsrLibDir, "usr-lib-dir", "", EnvUsrLibDir, "", "The host /usr/lib mountpoint. Used to detect GPU drivers to mount into inner container.") + cliflag.StringVarP(cmd.Flags(), &flags.innerUsrLibDir, "inner-usr-lib-dir", "", EnvInnerUsrLibDir, "", "The inner /usr/lib mountpoint. This is automatically detected based on /etc/os-release in the inner image, but may optionally be overridden.") cliflag.StringVarP(cmd.Flags(), &flags.dockerConfig, "docker-config", "", EnvDockerConfig, "/root/.docker/config.json", "The path to the docker config to consult when pulling an image.") cliflag.BoolVarP(cmd.Flags(), &flags.addTUN, "add-tun", "", EnvAddTun, false, "Add a TUN device to the inner container.") cliflag.BoolVarP(cmd.Flags(), &flags.addFUSE, "add-fuse", "", EnvAddFuse, false, "Add a FUSE device to the inner container.") @@ -523,7 +526,7 @@ func runDockerCVM(ctx context.Context, log slog.Logger, client dockerutil.Client // of the user so that we can chown directories to the namespaced UID inside // the inner container as well as whether we should be starting the container // with /sbin/init or something simple like 'sleep infinity'. - imgMeta, err := dockerutil.GetImageMetadata(ctx, client, flags.innerImage, flags.innerUsername) + imgMeta, err := dockerutil.GetImageMetadata(ctx, log, client, flags.innerImage, flags.innerUsername) if err != nil { return xerrors.Errorf("get image metadata: %w", err) } @@ -534,6 +537,8 @@ func runDockerCVM(ctx context.Context, log slog.Logger, client dockerutil.Client slog.F("uid", imgMeta.UID), slog.F("gid", imgMeta.GID), slog.F("has_init", imgMeta.HasInit), + slog.F("os_release", imgMeta.OsReleaseID), + slog.F("home_dir", imgMeta.HomeDir), ) uid, err := strconv.ParseInt(imgMeta.UID, 10, 32) @@ -614,16 +619,33 @@ func runDockerCVM(ctx context.Context, log slog.Logger, client dockerutil.Client }) } + innerUsrLibDir := imgMeta.UsrLibDir() + if flags.innerUsrLibDir != "" { + log.Info(ctx, "overriding auto-detected inner usr lib dir ", + slog.F("before", innerUsrLibDir), + slog.F("after", flags.innerUsrLibDir)) + innerUsrLibDir = flags.innerUsrLibDir + } for _, bind := range binds { // If the bind has a path that points to the host-mounted /usr/lib // directory we need to remap it to /usr/lib inside the container. mountpoint := bind.Path if strings.HasPrefix(mountpoint, flags.hostUsrLibDir) { mountpoint = filepath.Join( - "/usr/lib", + // Note: we used to mount into /usr/lib, but this can change + // based on the distro inside the container. + innerUsrLibDir, strings.TrimPrefix(mountpoint, strings.TrimSuffix(flags.hostUsrLibDir, "/")), ) } + // Even though xunix.GPUs checks for duplicate mounts, we need to check + // for duplicates again here after remapping the path. + if slices.ContainsFunc(mounts, func(m xunix.Mount) bool { + return m.Mountpoint == mountpoint + }) { + log.Debug(ctx, "skipping duplicate mount", slog.F("path", mountpoint)) + continue + } mounts = append(mounts, xunix.Mount{ Source: bind.Path, Mountpoint: mountpoint, diff --git a/dockerutil/image.go b/dockerutil/image.go index ffb2bdb..a5323f2 100644 --- a/dockerutil/image.go +++ b/dockerutil/image.go @@ -9,6 +9,7 @@ import ( "strings" "time" + "cdr.dev/slog" dockertypes "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" @@ -22,6 +23,23 @@ import ( const diskFullStorageDriver = "vfs" +// Adapted from https://github.com/NVIDIA/libnvidia-container/blob/v1.15.0/src/nvc_container.c#L152-L165 +var UsrLibDirs = map[string]string{ + // Debian-based distros use a multi-arch directory. + "debian": usrLibMultiarchDir, + "ubuntu": usrLibMultiarchDir, + // Fedora and Redhat use the standard /usr/lib64. + "fedora": "/usr/lib64", + "rhel": "/usr/lib64", + // Fall back to the standard /usr/lib. + "linux": "/usr/lib", +} + +// /etc/os-release is the standard location for system identification data on +// Linux systems running systemd. +// Ref: https://www.freedesktop.org/software/systemd/man/latest/os-release.html +var etcOsRelease = "/etc/os-release" + type PullImageConfig struct { Client Client Image string @@ -148,15 +166,16 @@ func processImagePullEvents(r io.Reader, fn ImagePullProgressFn) error { } type ImageMetadata struct { - UID string - GID string - HomeDir string - HasInit bool + UID string + GID string + HomeDir string + HasInit bool + OsReleaseID string } // GetImageMetadata returns metadata about an image such as the UID/GID of the // provided username and whether it contains an /sbin/init that we should run. -func GetImageMetadata(ctx context.Context, client Client, img, username string) (ImageMetadata, error) { +func GetImageMetadata(ctx context.Context, log slog.Logger, client Client, img, username string) (ImageMetadata, error) { // Creating a dummy container to inspect the filesystem. created, err := client.ContainerCreate(ctx, &container.Config{ @@ -226,14 +245,58 @@ func GetImageMetadata(ctx context.Context, client Client, img, username string) return ImageMetadata{}, xerrors.Errorf("no users returned for username %s", username) } + // Read the /etc/os-release file to get the ID of the OS. + // We only care about the ID field. + var osReleaseID string + out, err = ExecContainer(ctx, client, ExecConfig{ + ContainerID: inspect.ID, + Cmd: "cat", + Args: []string{etcOsRelease}, + }) + if err != nil { + log.Error(ctx, "read os-release", slog.Error(err)) + log.Error(ctx, "falling back to linux for os-release ID") + osReleaseID = "linux" + } else { + osReleaseID = GetOSReleaseID(out) + } + return ImageMetadata{ - UID: users[0].Uid, - GID: users[0].Gid, - HomeDir: users[0].HomeDir, - HasInit: initExists, + UID: users[0].Uid, + GID: users[0].Gid, + HomeDir: users[0].HomeDir, + HasInit: initExists, + OsReleaseID: osReleaseID, }, nil } +// UsrLibDir returns the path to the /usr/lib directory for the given +// operating system determined by the /etc/os-release file. +func (im ImageMetadata) UsrLibDir() string { + if val, ok := UsrLibDirs[im.OsReleaseID]; ok && val != "" { + return val + } + return UsrLibDirs["linux"] // fallback +} + +// GetOSReleaseID returns the ID of the operating system from the +// raw contents of /etc/os-release. +func GetOSReleaseID(raw []byte) string { + var osReleaseID string + for _, line := range strings.Split(string(raw), "\n") { + if strings.HasPrefix(line, "ID=") { + osReleaseID = strings.TrimPrefix(line, "ID=") + // The value may be quoted. + osReleaseID = strings.Trim(osReleaseID, "\"") + break + } + } + if osReleaseID == "" { + return "linux" + } + return osReleaseID +} + func DefaultLogImagePullFn(log buildlog.Logger) func(ImagePullEvent) error { var ( // Avoid spamming too frequently, the messages can come quickly diff --git a/dockerutil/image_linux_amd64.go b/dockerutil/image_linux_amd64.go new file mode 100644 index 0000000..577c1be --- /dev/null +++ b/dockerutil/image_linux_amd64.go @@ -0,0 +1,5 @@ +package dockerutil + +// usrLibMultiarchDir is defined for arm64 and amd64 architectures. +// Envbox is not published for other architectures. +var usrLibMultiarchDir = "/usr/lib/aarch64-linux-gnu" diff --git a/dockerutil/image_linux_arm64.go b/dockerutil/image_linux_arm64.go new file mode 100644 index 0000000..5952a29 --- /dev/null +++ b/dockerutil/image_linux_arm64.go @@ -0,0 +1,5 @@ +package dockerutil + +// usrLibMultiarchDir is defined for arm64 and amd64 architectures. +// Envbox is not published for other architectures. +var usrLibMultiarchDir = "/usr/lib/x86_64-linux-gnu" diff --git a/integration/docker_test.go b/integration/docker_test.go index 6bf0f7e..533d2ba 100644 --- a/integration/docker_test.go +++ b/integration/docker_test.go @@ -1,6 +1,3 @@ -//go:build integration -// +build integration - package integration_test import ( @@ -24,6 +21,9 @@ import ( func TestDocker(t *testing.T) { t.Parallel() + if val, ok := os.LookupEnv("CODER_TEST_INTEGRATION"); !ok || val != "1" { + t.Skip("integration tests are skipped unless CODER_TEST_INTEGRATION=1") + } // Dockerd just tests that dockerd can spin up and function correctly. t.Run("Dockerd", func(t *testing.T) { diff --git a/integration/gpu_test.go b/integration/gpu_test.go new file mode 100644 index 0000000..47edef4 --- /dev/null +++ b/integration/gpu_test.go @@ -0,0 +1,202 @@ +package integration_test + +import ( + "context" + "os" + "os/exec" + "slices" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/coder/envbox/integration/integrationtest" +) + +func TestDocker_Nvidia(t *testing.T) { + t.Parallel() + if val, ok := os.LookupEnv("CODER_TEST_INTEGRATION"); !ok || val != "1" { + t.Skip("integration tests are skipped unless CODER_TEST_INTEGRATION=1") + } + // Only run this test if the nvidia container runtime is detected. + // Check if the nvidia runtime is available using `docker info`. + // The docker client doesn't expose this information so we need to fetch it ourselves. + if !slices.Contains(dockerRuntimes(t), "nvidia") { + t.Skip("this test requires nvidia runtime to be available") + } + + t.Run("Ubuntu", func(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Start the envbox container. + ctID := startEnvboxCmd(ctx, t, integrationtest.UbuntuImage, "root", + "-v", "/usr/lib/x86_64-linux-gnu:/var/coder/usr/lib", + "--env", "CODER_ADD_GPU=true", + "--env", "CODER_USR_LIB_DIR=/var/coder/usr/lib", + "--runtime=nvidia", + "--gpus=all", + ) + + // Assert that we can run nvidia-smi in the inner container. + _, err := execContainerCmd(ctx, t, ctID, "docker", "exec", "workspace_cvm", "nvidia-smi") + require.NoError(t, err, "failed to run nvidia-smi in the inner container") + }) + + t.Run("Redhat", func(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Start the envbox container. + ctID := startEnvboxCmd(ctx, t, integrationtest.RedhatImage, "root", + "-v", "/usr/lib/x86_64-linux-gnu:/var/coder/usr/lib64", + "--env", "CODER_ADD_GPU=true", + "--env", "CODER_USR_LIB_DIR=/var/coder/usr/lib64", + "--runtime=nvidia", + "--gpus=all", + ) + + // Assert that we can run nvidia-smi in the inner container. + _, err := execContainerCmd(ctx, t, ctID, "docker", "exec", "workspace_cvm", "nvidia-smi") + require.NoError(t, err, "failed to run nvidia-smi in the inner container") + }) + + t.Run("InnerUsrLibDirOverride", func(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + // Start the envbox container. + ctID := startEnvboxCmd(ctx, t, integrationtest.UbuntuImage, "root", + "-v", "/usr/lib/x86_64-linux-gnu:/var/coder/usr/lib", + "--env", "CODER_ADD_GPU=true", + "--env", "CODER_USR_LIB_DIR=/var/coder/usr/lib", + "--env", "CODER_INNER_USR_LIB_DIR=/usr/lib/coder", + "--runtime=nvidia", + "--gpus=all", + ) + + // Assert that the libraries end up in the expected location in the inner container. + out, err := execContainerCmd(ctx, t, ctID, "docker", "exec", "workspace_cvm", "ls", "-l", "/usr/lib/coder") + require.NoError(t, err, "inner usr lib dir override failed") + require.Regexp(t, `(?i)(libgl|nvidia|vulkan|cuda)`, out) + }) +} + +// dockerRuntimes returns the list of container runtimes available on the host. +// It does this by running `docker info` and parsing the output. +func dockerRuntimes(t *testing.T) []string { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + cmd := exec.CommandContext(ctx, "docker", "info", "--format", "{{ range $k, $v := .Runtimes}}{{ println $k }}{{ end }}") + out, err := cmd.CombinedOutput() + require.NoError(t, err, "failed to get docker runtimes: %s", out) + raw := strings.TrimSpace(string(out)) + return strings.Split(raw, "\n") +} + +// startEnvboxCmd starts the envbox container with the given arguments. +// Ideally we would use ory/dockertest for this, but it doesn't support +// specifying the runtime. We have alternatively used the docker client library, +// but a nice property of using the docker cli is that if a test fails, you can +// just run the command manually to debug! +func startEnvboxCmd(ctx context.Context, t *testing.T, innerImage, innerUser string, addlArgs ...string) (containerID string) { + t.Helper() + + var ( + tmpDir = integrationtest.TmpDir(t) + binds = integrationtest.DefaultBinds(t, tmpDir) + cancelCtx, cancel = context.WithCancel(ctx) + ) + t.Cleanup(cancel) + + // Unfortunately ory/dockertest does not allow us to specify runtime. + // We're instead going to just run the container directly via the docker cli. + startEnvboxArgs := []string{ + "run", + "--detach", + "--rm", + "--privileged", + "--env", "CODER_INNER_IMAGE=" + innerImage, + "--env", "CODER_INNER_USERNAME=" + innerUser, + } + for _, bind := range binds { + bindParts := []string{bind.Source, bind.Target} + if bind.ReadOnly { + bindParts = append(bindParts, "ro") + } + startEnvboxArgs = append(startEnvboxArgs, []string{"-v", strings.Join(bindParts, ":")}...) + } + startEnvboxArgs = append(startEnvboxArgs, addlArgs...) + startEnvboxArgs = append(startEnvboxArgs, "envbox:latest", "/envbox", "docker") + t.Logf("envbox docker cmd: docker %s", strings.Join(startEnvboxArgs, " ")) + + // Start the envbox container without attaching. + startEnvboxCmd := exec.CommandContext(cancelCtx, "docker", startEnvboxArgs...) + out, err := startEnvboxCmd.CombinedOutput() + require.NoError(t, err, "failed to start envbox container") + containerID = strings.TrimSpace(string(out)) + t.Logf("envbox container ID: %s", containerID) + t.Cleanup(func() { + if t.Failed() { + // Dump the logs if the test failed. + logsCmd := exec.Command("docker", "logs", containerID) + out, err := logsCmd.CombinedOutput() + if err != nil { + t.Logf("failed to read logs: %s", err) + } + t.Logf("envbox logs:\n%s", string(out)) + } + // Stop the envbox container. + stopEnvboxCmd := exec.Command("docker", "rm", "-f", containerID) + out, err := stopEnvboxCmd.CombinedOutput() + if err != nil { + t.Errorf("failed to stop envbox container: %s", out) + } + }) + + // Wait for the Docker CVM to come up. + waitCtx, waitCancel := context.WithTimeout(cancelCtx, 5*time.Minute) + defer waitCancel() +WAITLOOP: + for { + select { + case <-waitCtx.Done(): + t.Fatal("timed out waiting for inner container to come up") + default: + execCmd := exec.CommandContext(cancelCtx, "docker", "exec", containerID, "docker", "inspect", "workspace_cvm") + out, err := execCmd.CombinedOutput() + if err != nil { + t.Logf("waiting for inner container to come up:\n%s", string(out)) + <-time.After(time.Second) + continue WAITLOOP + } + t.Logf("inner container is up") + break WAITLOOP + } + } + + return containerID +} + +func execContainerCmd(ctx context.Context, t *testing.T, containerID string, cmdArgs ...string) (string, error) { + t.Helper() + + execArgs := []string{"exec", containerID} + execArgs = append(execArgs, cmdArgs...) + t.Logf("exec cmd: docker %s", strings.Join(execArgs, " ")) + execCmd := exec.CommandContext(ctx, "docker", execArgs...) + out, err := execCmd.CombinedOutput() + if err != nil { + t.Logf("exec cmd failed: %s\n%s", err.Error(), string(out)) + } else { + t.Logf("exec cmd success: %s", out) + } + return strings.TrimSpace(string(out)), err +} diff --git a/integration/integrationtest/docker.go b/integration/integrationtest/docker.go index 6b5e07c..4390ee2 100644 --- a/integration/integrationtest/docker.go +++ b/integration/integrationtest/docker.go @@ -40,6 +40,8 @@ const ( // UbuntuImage is just vanilla ubuntu (80MB) but the user is set to a non-root // user . UbuntuImage = "gcr.io/coder-dev-1/sreya/ubuntu-coder" + // Redhat UBI9 image as of 2025-03-05 + RedhatImage = "registry.access.redhat.com/ubi9/ubi:9.5" // RegistryImage is used to assert that we add certs // correctly to the docker daemon when pulling an image