1
0
mirror of https://github.com/openshift/source-to-image.git synced 2026-02-05 03:45:02 +01:00

Update go version and depedencies

Changes:
- Update go version to 1.25.3
- Update dependencies to mitigate CVEs
- Fix tests to work with broken APIs after update
- Remove/Replace deprecated methods
- Update Git command to ignore pre-commit hooks
- Update Go builder image to 1.25 in docker files and Openshift CI
- Update tekton pipeline task bundles to have updated golang image

Signed-off-by: Sayan Biswas <sayan-biswas@live.com>
This commit is contained in:
Sayan Biswas
2025-11-17 02:05:40 +05:30
committed by Sayan Biswas
parent 45635f9ec8
commit 09956e52a1
598 changed files with 14137 additions and 10285 deletions

View File

@@ -1,4 +1,4 @@
build_root_image:
name: release
namespace: openshift
tag: rhel-9-golang-1.23-openshift-4.19
tag: rhel-9-golang-1.25-openshift-4.21

View File

@@ -138,14 +138,6 @@ spec:
description: Set 'true' to run unit tests
name: run-unit-test
type: string
- description: The command used to run the unit test
name: unit-test-command
type: string
default: make test
- description: The base image used to run the unit tests
name: unit-test-base-image
type: string
default: registry.redhat.io/ubi9/go-toolset:1.23
- default: docker
description: The format for the resulting image's mediaType. Valid values are oci (default) or docker.
name: buildah-format
@@ -191,7 +183,7 @@ spec:
- name: name
value: init
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:abf231cfc5a68b56f68a8ac9bb26dca3c3e434c88dd9627c72bdec0b8c335c67
value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:4072de81ade0a75ad1eaa5449a7ff02bba84757064549a81b48c28fab3aeca59
- name: kind
value: task
resolver: bundles
@@ -212,7 +204,7 @@ spec:
- name: name
value: git-clone-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-git-clone-oci-ta:0.1@sha256:3f1b468066b301083d8550e036f5a654fcb064810bd29eb06fec6d8ad3e35b9c
value: quay.io/konflux-ci/tekton-catalog/task-git-clone-oci-ta:0.1@sha256:ea64f5b99202621e78ed3d74b00df5750cbf572c391e6da1956396f5945e4e11
- name: kind
value: task
resolver: bundles
@@ -243,7 +235,7 @@ spec:
- name: name
value: prefetch-dependencies-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:dc82a7270aace9b1c26f7e96f8ccab2752e53d32980c41a45e1733baad76cde6
value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:9dbb38efdfca525b00dc502acf44723ac4a6c413bb2ab97459a13cd3a6056f17
- name: kind
value: task
resolver: bundles
@@ -262,10 +254,6 @@ spec:
value: $(tasks.clone-repository.results.SOURCE_ARTIFACT)
- name: CACHI2_ARTIFACT
value: $(tasks.prefetch-dependencies.results.CACHI2_ARTIFACT)
- name: TEST_COMMAND
value: $(params.unit-test-command)
- name: BASE_IMAGE
value: $(params.unit-test-base-image)
when:
- input: $(params.run-unit-test)
operator: in
@@ -313,7 +301,7 @@ spec:
- name: name
value: buildah-remote-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.5@sha256:650b0bca57c626c1e82f35cdfadf44a7792230b2b992aaa9c369d615aae6590d
value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.7@sha256:c597a9f523b1115a88b9910267dd8f71057b0fa4f78e3dadf5a5c0affc5ea773
- name: kind
value: task
resolver: bundles
@@ -344,7 +332,7 @@ spec:
- name: name
value: build-image-index
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:79784d53749584bc5a8de32142ec4e2f01cdbf42c20d94e59280e0b927c8597d
value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:0e90cf8259c7f54baad27d2a538294115f725ceb269ef789957fe68790803cbd
- name: kind
value: task
resolver: bundles
@@ -370,7 +358,7 @@ spec:
- name: name
value: source-build-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-source-build-oci-ta:0.3@sha256:b0d6cb28a23f20db4f5cf78ed78ae3a91b9a5adfe989696ed0bbc63840a485b6
value: quay.io/konflux-ci/tekton-catalog/task-source-build-oci-ta:0.3@sha256:f62ef32f7d25f0ee50904b57b160e3fd5403fab5ec040c7aa99f5982fdd92ef4
- name: kind
value: task
resolver: bundles
@@ -396,7 +384,7 @@ spec:
- name: name
value: deprecated-image-check
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-deprecated-image-check:0.5@sha256:f59175d9a0a60411738228dfe568af4684af4aa5e7e05c832927cb917801d489
value: quay.io/konflux-ci/tekton-catalog/task-deprecated-image-check:0.5@sha256:462baed733dfc38aca5395499e92f19b6f13a74c2e88fe5d86c3cffa2f899b57
- name: kind
value: task
resolver: bundles
@@ -418,7 +406,7 @@ spec:
- name: name
value: clair-scan
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:a7cc183967f89c4ac100d04ab8f81e54733beee60a0528208107c9a22d3c43af
value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:8ec7d7b9438ace5ef3fb03a533d9440d0fd81e51c73b0dc1eb51602fb7cd044e
- name: kind
value: task
resolver: bundles
@@ -438,7 +426,7 @@ spec:
- name: name
value: ecosystem-cert-preflight-checks
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:dae8e28761cee4ab0baf04ab9f8f1a4b3cee3c7decf461fda2bacc5c01652a60
value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:04f75593558f79a27da2336400bc63d460bf0c5669e3c13f40ee2fb650b1ad1e
- name: kind
value: task
resolver: bundles
@@ -464,7 +452,7 @@ spec:
- name: name
value: sast-snyk-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:783f5de1b4def2fb3fad20b914f4b3afee46ffb8f652114946e321ef3fa86449
value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:8ad28b7783837a24acbc9a8494c935e796e591ce476085ad5899bebd7e53f077
- name: kind
value: task
resolver: bundles
@@ -486,7 +474,7 @@ spec:
- name: name
value: clamav-scan
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:b0bd59748cda4a7abf311e4f448e6c1d00c6b6d8c0ecc1c2eb33e08dc0e0b802
value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:f3d2d179cddcc07d0228d9f52959a233037a3afa2619d0a8b2effbb467db80c3
- name: kind
value: task
resolver: bundles
@@ -531,7 +519,7 @@ spec:
- name: name
value: sast-coverity-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:cdbe1a968676e4f5519b082bf1e27a4cdcf66dd60af66dbc26b3e604f957f7e9
value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:78f5244a8cfd28c890ed62db7e4ff1fc97ff39876d37fb19f1b0c2c286a4002c
- name: kind
value: task
resolver: bundles
@@ -552,7 +540,7 @@ spec:
- name: name
value: coverity-availability-check
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-coverity-availability-check:0.2@sha256:db2b267dc15e4ed17f704ee91b8e9b38068e1a35b1018a328fdca621819d74c6
value: quay.io/konflux-ci/tekton-catalog/task-coverity-availability-check:0.2@sha256:36400873d3031df128c55aa71ee11d322c3e55fd8f13dc5779098fbc117c0aa3
- name: kind
value: task
resolver: bundles
@@ -578,7 +566,7 @@ spec:
- name: name
value: sast-shell-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:bf7bdde00b7212f730c1356672290af6f38d070da2c8a316987b5c32fd49e0b9
value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:d44336d7bcbd1f7cedee639357a493bd1f661e2859e49e11a34644bdf6819c4e
- name: kind
value: task
resolver: bundles
@@ -604,7 +592,7 @@ spec:
- name: name
value: sast-unicode-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.3@sha256:a2bde66f6b4164620298c7d709b8f08515409404000fa1dc2260d2508b135651
value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.3@sha256:e5a8d3e8e7be7246a1460385b95c084ea6e8fe7520d40fe4389deb90f1bf5176
- name: kind
value: task
resolver: bundles
@@ -628,7 +616,7 @@ spec:
- name: name
value: apply-tags
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-apply-tags:0.2@sha256:f44be1bf0262471f2f503f5e19da5f0628dcaf968c86272a2ad6b4871e708448
value: quay.io/konflux-ci/tekton-catalog/task-apply-tags:0.2@sha256:ade0bf9c2e9c169f588fbfe71fb489c2f7053fe41884e7969f270b317d9eb548
- name: kind
value: task
resolver: bundles
@@ -651,7 +639,7 @@ spec:
- name: name
value: push-dockerfile-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:49f778479f468e71c2cfef722e96aa813d7ef98bde8a612e1bf1a13cd70849ec
value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:08bba4a659ecd48f871bef00b80af58954e5a09fcbb28a1783ddd640c4f6535e
- name: kind
value: task
resolver: bundles

View File

@@ -137,14 +137,6 @@ spec:
description: Set 'true' to run unit tests
name: run-unit-test
type: string
- description: The command used to run the unit test
name: unit-test-command
type: string
default: make test
- description: The base image used to run the unit tests
name: unit-test-base-image
type: string
default: registry.redhat.io/ubi9/go-toolset:1.23
- default: docker
description: The format for the resulting image's mediaType. Valid values are oci (default) or docker.
name: buildah-format
@@ -190,7 +182,7 @@ spec:
- name: name
value: init
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:abf231cfc5a68b56f68a8ac9bb26dca3c3e434c88dd9627c72bdec0b8c335c67
value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:4072de81ade0a75ad1eaa5449a7ff02bba84757064549a81b48c28fab3aeca59
- name: kind
value: task
resolver: bundles
@@ -211,7 +203,7 @@ spec:
- name: name
value: git-clone-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-git-clone-oci-ta:0.1@sha256:3f1b468066b301083d8550e036f5a654fcb064810bd29eb06fec6d8ad3e35b9c
value: quay.io/konflux-ci/tekton-catalog/task-git-clone-oci-ta:0.1@sha256:ea64f5b99202621e78ed3d74b00df5750cbf572c391e6da1956396f5945e4e11
- name: kind
value: task
resolver: bundles
@@ -242,7 +234,7 @@ spec:
- name: name
value: prefetch-dependencies-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:dc82a7270aace9b1c26f7e96f8ccab2752e53d32980c41a45e1733baad76cde6
value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:9dbb38efdfca525b00dc502acf44723ac4a6c413bb2ab97459a13cd3a6056f17
- name: kind
value: task
resolver: bundles
@@ -261,10 +253,6 @@ spec:
value: $(tasks.clone-repository.results.SOURCE_ARTIFACT)
- name: CACHI2_ARTIFACT
value: $(tasks.prefetch-dependencies.results.CACHI2_ARTIFACT)
- name: TEST_COMMAND
value: $(params.unit-test-command)
- name: BASE_IMAGE
value: $(params.unit-test-base-image)
when:
- input: $(params.run-unit-test)
operator: in
@@ -312,7 +300,7 @@ spec:
- name: name
value: buildah-remote-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.5@sha256:650b0bca57c626c1e82f35cdfadf44a7792230b2b992aaa9c369d615aae6590d
value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.7@sha256:c597a9f523b1115a88b9910267dd8f71057b0fa4f78e3dadf5a5c0affc5ea773
- name: kind
value: task
resolver: bundles
@@ -343,7 +331,7 @@ spec:
- name: name
value: build-image-index
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:79784d53749584bc5a8de32142ec4e2f01cdbf42c20d94e59280e0b927c8597d
value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:0e90cf8259c7f54baad27d2a538294115f725ceb269ef789957fe68790803cbd
- name: kind
value: task
resolver: bundles
@@ -369,7 +357,7 @@ spec:
- name: name
value: source-build-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-source-build-oci-ta:0.3@sha256:b0d6cb28a23f20db4f5cf78ed78ae3a91b9a5adfe989696ed0bbc63840a485b6
value: quay.io/konflux-ci/tekton-catalog/task-source-build-oci-ta:0.3@sha256:f62ef32f7d25f0ee50904b57b160e3fd5403fab5ec040c7aa99f5982fdd92ef4
- name: kind
value: task
resolver: bundles
@@ -395,7 +383,7 @@ spec:
- name: name
value: deprecated-image-check
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-deprecated-image-check:0.5@sha256:f59175d9a0a60411738228dfe568af4684af4aa5e7e05c832927cb917801d489
value: quay.io/konflux-ci/tekton-catalog/task-deprecated-image-check:0.5@sha256:462baed733dfc38aca5395499e92f19b6f13a74c2e88fe5d86c3cffa2f899b57
- name: kind
value: task
resolver: bundles
@@ -417,7 +405,7 @@ spec:
- name: name
value: clair-scan
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:a7cc183967f89c4ac100d04ab8f81e54733beee60a0528208107c9a22d3c43af
value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:8ec7d7b9438ace5ef3fb03a533d9440d0fd81e51c73b0dc1eb51602fb7cd044e
- name: kind
value: task
resolver: bundles
@@ -437,7 +425,7 @@ spec:
- name: name
value: ecosystem-cert-preflight-checks
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:dae8e28761cee4ab0baf04ab9f8f1a4b3cee3c7decf461fda2bacc5c01652a60
value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:04f75593558f79a27da2336400bc63d460bf0c5669e3c13f40ee2fb650b1ad1e
- name: kind
value: task
resolver: bundles
@@ -463,7 +451,7 @@ spec:
- name: name
value: sast-snyk-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:783f5de1b4def2fb3fad20b914f4b3afee46ffb8f652114946e321ef3fa86449
value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:8ad28b7783837a24acbc9a8494c935e796e591ce476085ad5899bebd7e53f077
- name: kind
value: task
resolver: bundles
@@ -485,7 +473,7 @@ spec:
- name: name
value: clamav-scan
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:b0bd59748cda4a7abf311e4f448e6c1d00c6b6d8c0ecc1c2eb33e08dc0e0b802
value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:f3d2d179cddcc07d0228d9f52959a233037a3afa2619d0a8b2effbb467db80c3
- name: kind
value: task
resolver: bundles
@@ -530,7 +518,7 @@ spec:
- name: name
value: sast-coverity-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:cdbe1a968676e4f5519b082bf1e27a4cdcf66dd60af66dbc26b3e604f957f7e9
value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:78f5244a8cfd28c890ed62db7e4ff1fc97ff39876d37fb19f1b0c2c286a4002c
- name: kind
value: task
resolver: bundles
@@ -551,7 +539,7 @@ spec:
- name: name
value: coverity-availability-check
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-coverity-availability-check:0.2@sha256:db2b267dc15e4ed17f704ee91b8e9b38068e1a35b1018a328fdca621819d74c6
value: quay.io/konflux-ci/tekton-catalog/task-coverity-availability-check:0.2@sha256:36400873d3031df128c55aa71ee11d322c3e55fd8f13dc5779098fbc117c0aa3
- name: kind
value: task
resolver: bundles
@@ -577,7 +565,7 @@ spec:
- name: name
value: sast-shell-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:bf7bdde00b7212f730c1356672290af6f38d070da2c8a316987b5c32fd49e0b9
value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:d44336d7bcbd1f7cedee639357a493bd1f661e2859e49e11a34644bdf6819c4e
- name: kind
value: task
resolver: bundles
@@ -603,7 +591,7 @@ spec:
- name: name
value: sast-unicode-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.3@sha256:a2bde66f6b4164620298c7d709b8f08515409404000fa1dc2260d2508b135651
value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.3@sha256:e5a8d3e8e7be7246a1460385b95c084ea6e8fe7520d40fe4389deb90f1bf5176
- name: kind
value: task
resolver: bundles
@@ -627,7 +615,7 @@ spec:
- name: name
value: apply-tags
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-apply-tags:0.2@sha256:f44be1bf0262471f2f503f5e19da5f0628dcaf968c86272a2ad6b4871e708448
value: quay.io/konflux-ci/tekton-catalog/task-apply-tags:0.2@sha256:ade0bf9c2e9c169f588fbfe71fb489c2f7053fe41884e7969f270b317d9eb548
- name: kind
value: task
resolver: bundles
@@ -650,7 +638,7 @@ spec:
- name: name
value: push-dockerfile-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:49f778479f468e71c2cfef722e96aa813d7ef98bde8a612e1bf1a13cd70849ec
value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:08bba4a659ecd48f871bef00b80af58954e5a09fcbb28a1783ddd640c4f6535e
- name: kind
value: task
resolver: bundles

View File

@@ -138,14 +138,6 @@ spec:
description: Set 'true' to run unit tests
name: run-unit-test
type: string
- description: The command used to run the unit test
name: unit-test-command
type: string
default: make test
- description: The base image used to run the unit tests
name: unit-test-base-image
type: string
default: registry.redhat.io/ubi9/go-toolset:1.23
- default: docker
description: The format for the resulting image's mediaType. Valid values are oci (default) or docker.
name: buildah-format
@@ -191,7 +183,7 @@ spec:
- name: name
value: init
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:abf231cfc5a68b56f68a8ac9bb26dca3c3e434c88dd9627c72bdec0b8c335c67
value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:4072de81ade0a75ad1eaa5449a7ff02bba84757064549a81b48c28fab3aeca59
- name: kind
value: task
resolver: bundles
@@ -212,7 +204,7 @@ spec:
- name: name
value: git-clone-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-git-clone-oci-ta:0.1@sha256:3f1b468066b301083d8550e036f5a654fcb064810bd29eb06fec6d8ad3e35b9c
value: quay.io/konflux-ci/tekton-catalog/task-git-clone-oci-ta:0.1@sha256:ea64f5b99202621e78ed3d74b00df5750cbf572c391e6da1956396f5945e4e11
- name: kind
value: task
resolver: bundles
@@ -243,7 +235,7 @@ spec:
- name: name
value: prefetch-dependencies-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:dc82a7270aace9b1c26f7e96f8ccab2752e53d32980c41a45e1733baad76cde6
value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:9dbb38efdfca525b00dc502acf44723ac4a6c413bb2ab97459a13cd3a6056f17
- name: kind
value: task
resolver: bundles
@@ -262,10 +254,6 @@ spec:
value: $(tasks.clone-repository.results.SOURCE_ARTIFACT)
- name: CACHI2_ARTIFACT
value: $(tasks.prefetch-dependencies.results.CACHI2_ARTIFACT)
- name: TEST_COMMAND
value: $(params.unit-test-command)
- name: BASE_IMAGE
value: $(params.unit-test-base-image)
when:
- input: $(params.run-unit-test)
operator: in
@@ -313,7 +301,7 @@ spec:
- name: name
value: buildah-remote-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.5@sha256:650b0bca57c626c1e82f35cdfadf44a7792230b2b992aaa9c369d615aae6590d
value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.7@sha256:c597a9f523b1115a88b9910267dd8f71057b0fa4f78e3dadf5a5c0affc5ea773
- name: kind
value: task
resolver: bundles
@@ -344,7 +332,7 @@ spec:
- name: name
value: build-image-index
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:79784d53749584bc5a8de32142ec4e2f01cdbf42c20d94e59280e0b927c8597d
value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:0e90cf8259c7f54baad27d2a538294115f725ceb269ef789957fe68790803cbd
- name: kind
value: task
resolver: bundles
@@ -370,7 +358,7 @@ spec:
- name: name
value: source-build-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-source-build-oci-ta:0.3@sha256:b0d6cb28a23f20db4f5cf78ed78ae3a91b9a5adfe989696ed0bbc63840a485b6
value: quay.io/konflux-ci/tekton-catalog/task-source-build-oci-ta:0.3@sha256:f62ef32f7d25f0ee50904b57b160e3fd5403fab5ec040c7aa99f5982fdd92ef4
- name: kind
value: task
resolver: bundles
@@ -396,7 +384,7 @@ spec:
- name: name
value: deprecated-image-check
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-deprecated-image-check:0.5@sha256:f59175d9a0a60411738228dfe568af4684af4aa5e7e05c832927cb917801d489
value: quay.io/konflux-ci/tekton-catalog/task-deprecated-image-check:0.5@sha256:462baed733dfc38aca5395499e92f19b6f13a74c2e88fe5d86c3cffa2f899b57
- name: kind
value: task
resolver: bundles
@@ -418,7 +406,7 @@ spec:
- name: name
value: clair-scan
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:a7cc183967f89c4ac100d04ab8f81e54733beee60a0528208107c9a22d3c43af
value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:8ec7d7b9438ace5ef3fb03a533d9440d0fd81e51c73b0dc1eb51602fb7cd044e
- name: kind
value: task
resolver: bundles
@@ -438,7 +426,7 @@ spec:
- name: name
value: ecosystem-cert-preflight-checks
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:dae8e28761cee4ab0baf04ab9f8f1a4b3cee3c7decf461fda2bacc5c01652a60
value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:04f75593558f79a27da2336400bc63d460bf0c5669e3c13f40ee2fb650b1ad1e
- name: kind
value: task
resolver: bundles
@@ -464,7 +452,7 @@ spec:
- name: name
value: sast-snyk-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:783f5de1b4def2fb3fad20b914f4b3afee46ffb8f652114946e321ef3fa86449
value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:8ad28b7783837a24acbc9a8494c935e796e591ce476085ad5899bebd7e53f077
- name: kind
value: task
resolver: bundles
@@ -486,7 +474,7 @@ spec:
- name: name
value: clamav-scan
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:b0bd59748cda4a7abf311e4f448e6c1d00c6b6d8c0ecc1c2eb33e08dc0e0b802
value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:f3d2d179cddcc07d0228d9f52959a233037a3afa2619d0a8b2effbb467db80c3
- name: kind
value: task
resolver: bundles
@@ -531,7 +519,7 @@ spec:
- name: name
value: sast-coverity-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:cdbe1a968676e4f5519b082bf1e27a4cdcf66dd60af66dbc26b3e604f957f7e9
value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:78f5244a8cfd28c890ed62db7e4ff1fc97ff39876d37fb19f1b0c2c286a4002c
- name: kind
value: task
resolver: bundles
@@ -552,7 +540,7 @@ spec:
- name: name
value: coverity-availability-check
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-coverity-availability-check:0.2@sha256:db2b267dc15e4ed17f704ee91b8e9b38068e1a35b1018a328fdca621819d74c6
value: quay.io/konflux-ci/tekton-catalog/task-coverity-availability-check:0.2@sha256:36400873d3031df128c55aa71ee11d322c3e55fd8f13dc5779098fbc117c0aa3
- name: kind
value: task
resolver: bundles
@@ -578,7 +566,7 @@ spec:
- name: name
value: sast-shell-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:bf7bdde00b7212f730c1356672290af6f38d070da2c8a316987b5c32fd49e0b9
value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:d44336d7bcbd1f7cedee639357a493bd1f661e2859e49e11a34644bdf6819c4e
- name: kind
value: task
resolver: bundles
@@ -604,7 +592,7 @@ spec:
- name: name
value: sast-unicode-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.3@sha256:a2bde66f6b4164620298c7d709b8f08515409404000fa1dc2260d2508b135651
value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.3@sha256:e5a8d3e8e7be7246a1460385b95c084ea6e8fe7520d40fe4389deb90f1bf5176
- name: kind
value: task
resolver: bundles
@@ -628,7 +616,7 @@ spec:
- name: name
value: apply-tags
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-apply-tags:0.2@sha256:f44be1bf0262471f2f503f5e19da5f0628dcaf968c86272a2ad6b4871e708448
value: quay.io/konflux-ci/tekton-catalog/task-apply-tags:0.2@sha256:ade0bf9c2e9c169f588fbfe71fb489c2f7053fe41884e7969f270b317d9eb548
- name: kind
value: task
resolver: bundles
@@ -651,7 +639,7 @@ spec:
- name: name
value: push-dockerfile-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:49f778479f468e71c2cfef722e96aa813d7ef98bde8a612e1bf1a13cd70849ec
value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:08bba4a659ecd48f871bef00b80af58954e5a09fcbb28a1783ddd640c4f6535e
- name: kind
value: task
resolver: bundles

View File

@@ -137,14 +137,6 @@ spec:
description: Set 'true' to run unit tests
name: run-unit-test
type: string
- description: The command used to run the unit test
name: unit-test-command
type: string
default: make test
- description: The base image used to run the unit tests
name: unit-test-base-image
type: string
default: registry.redhat.io/ubi9/go-toolset:1.23
- default: docker
description: The format for the resulting image's mediaType. Valid values are oci (default) or docker.
name: buildah-format
@@ -190,7 +182,7 @@ spec:
- name: name
value: init
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:abf231cfc5a68b56f68a8ac9bb26dca3c3e434c88dd9627c72bdec0b8c335c67
value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:4072de81ade0a75ad1eaa5449a7ff02bba84757064549a81b48c28fab3aeca59
- name: kind
value: task
resolver: bundles
@@ -211,7 +203,7 @@ spec:
- name: name
value: git-clone-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-git-clone-oci-ta:0.1@sha256:3f1b468066b301083d8550e036f5a654fcb064810bd29eb06fec6d8ad3e35b9c
value: quay.io/konflux-ci/tekton-catalog/task-git-clone-oci-ta:0.1@sha256:ea64f5b99202621e78ed3d74b00df5750cbf572c391e6da1956396f5945e4e11
- name: kind
value: task
resolver: bundles
@@ -242,7 +234,7 @@ spec:
- name: name
value: prefetch-dependencies-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:dc82a7270aace9b1c26f7e96f8ccab2752e53d32980c41a45e1733baad76cde6
value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies-oci-ta:0.2@sha256:9dbb38efdfca525b00dc502acf44723ac4a6c413bb2ab97459a13cd3a6056f17
- name: kind
value: task
resolver: bundles
@@ -261,10 +253,6 @@ spec:
value: $(tasks.clone-repository.results.SOURCE_ARTIFACT)
- name: CACHI2_ARTIFACT
value: $(tasks.prefetch-dependencies.results.CACHI2_ARTIFACT)
- name: TEST_COMMAND
value: $(params.unit-test-command)
- name: BASE_IMAGE
value: $(params.unit-test-base-image)
when:
- input: $(params.run-unit-test)
operator: in
@@ -312,7 +300,7 @@ spec:
- name: name
value: buildah-remote-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.5@sha256:650b0bca57c626c1e82f35cdfadf44a7792230b2b992aaa9c369d615aae6590d
value: quay.io/konflux-ci/tekton-catalog/task-buildah-remote-oci-ta:0.7@sha256:c597a9f523b1115a88b9910267dd8f71057b0fa4f78e3dadf5a5c0affc5ea773
- name: kind
value: task
resolver: bundles
@@ -343,7 +331,7 @@ spec:
- name: name
value: build-image-index
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:79784d53749584bc5a8de32142ec4e2f01cdbf42c20d94e59280e0b927c8597d
value: quay.io/konflux-ci/tekton-catalog/task-build-image-index:0.1@sha256:0e90cf8259c7f54baad27d2a538294115f725ceb269ef789957fe68790803cbd
- name: kind
value: task
resolver: bundles
@@ -369,7 +357,7 @@ spec:
- name: name
value: source-build-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-source-build-oci-ta:0.3@sha256:b0d6cb28a23f20db4f5cf78ed78ae3a91b9a5adfe989696ed0bbc63840a485b6
value: quay.io/konflux-ci/tekton-catalog/task-source-build-oci-ta:0.3@sha256:f62ef32f7d25f0ee50904b57b160e3fd5403fab5ec040c7aa99f5982fdd92ef4
- name: kind
value: task
resolver: bundles
@@ -395,7 +383,7 @@ spec:
- name: name
value: deprecated-image-check
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-deprecated-image-check:0.5@sha256:f59175d9a0a60411738228dfe568af4684af4aa5e7e05c832927cb917801d489
value: quay.io/konflux-ci/tekton-catalog/task-deprecated-image-check:0.5@sha256:462baed733dfc38aca5395499e92f19b6f13a74c2e88fe5d86c3cffa2f899b57
- name: kind
value: task
resolver: bundles
@@ -417,7 +405,7 @@ spec:
- name: name
value: clair-scan
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:a7cc183967f89c4ac100d04ab8f81e54733beee60a0528208107c9a22d3c43af
value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.3@sha256:8ec7d7b9438ace5ef3fb03a533d9440d0fd81e51c73b0dc1eb51602fb7cd044e
- name: kind
value: task
resolver: bundles
@@ -437,7 +425,7 @@ spec:
- name: name
value: ecosystem-cert-preflight-checks
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:dae8e28761cee4ab0baf04ab9f8f1a4b3cee3c7decf461fda2bacc5c01652a60
value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.2@sha256:04f75593558f79a27da2336400bc63d460bf0c5669e3c13f40ee2fb650b1ad1e
- name: kind
value: task
resolver: bundles
@@ -463,7 +451,7 @@ spec:
- name: name
value: sast-snyk-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:783f5de1b4def2fb3fad20b914f4b3afee46ffb8f652114946e321ef3fa86449
value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check-oci-ta:0.4@sha256:8ad28b7783837a24acbc9a8494c935e796e591ce476085ad5899bebd7e53f077
- name: kind
value: task
resolver: bundles
@@ -485,7 +473,7 @@ spec:
- name: name
value: clamav-scan
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:b0bd59748cda4a7abf311e4f448e6c1d00c6b6d8c0ecc1c2eb33e08dc0e0b802
value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.3@sha256:f3d2d179cddcc07d0228d9f52959a233037a3afa2619d0a8b2effbb467db80c3
- name: kind
value: task
resolver: bundles
@@ -530,7 +518,7 @@ spec:
- name: name
value: sast-coverity-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:cdbe1a968676e4f5519b082bf1e27a4cdcf66dd60af66dbc26b3e604f957f7e9
value: quay.io/konflux-ci/tekton-catalog/task-sast-coverity-check-oci-ta:0.3@sha256:78f5244a8cfd28c890ed62db7e4ff1fc97ff39876d37fb19f1b0c2c286a4002c
- name: kind
value: task
resolver: bundles
@@ -551,7 +539,7 @@ spec:
- name: name
value: coverity-availability-check
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-coverity-availability-check:0.2@sha256:db2b267dc15e4ed17f704ee91b8e9b38068e1a35b1018a328fdca621819d74c6
value: quay.io/konflux-ci/tekton-catalog/task-coverity-availability-check:0.2@sha256:36400873d3031df128c55aa71ee11d322c3e55fd8f13dc5779098fbc117c0aa3
- name: kind
value: task
resolver: bundles
@@ -577,7 +565,7 @@ spec:
- name: name
value: sast-shell-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:bf7bdde00b7212f730c1356672290af6f38d070da2c8a316987b5c32fd49e0b9
value: quay.io/konflux-ci/tekton-catalog/task-sast-shell-check-oci-ta:0.1@sha256:d44336d7bcbd1f7cedee639357a493bd1f661e2859e49e11a34644bdf6819c4e
- name: kind
value: task
resolver: bundles
@@ -603,7 +591,7 @@ spec:
- name: name
value: sast-unicode-check-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.3@sha256:a2bde66f6b4164620298c7d709b8f08515409404000fa1dc2260d2508b135651
value: quay.io/konflux-ci/tekton-catalog/task-sast-unicode-check-oci-ta:0.3@sha256:e5a8d3e8e7be7246a1460385b95c084ea6e8fe7520d40fe4389deb90f1bf5176
- name: kind
value: task
resolver: bundles
@@ -627,7 +615,7 @@ spec:
- name: name
value: apply-tags
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-apply-tags:0.2@sha256:f44be1bf0262471f2f503f5e19da5f0628dcaf968c86272a2ad6b4871e708448
value: quay.io/konflux-ci/tekton-catalog/task-apply-tags:0.2@sha256:ade0bf9c2e9c169f588fbfe71fb489c2f7053fe41884e7969f270b317d9eb548
- name: kind
value: task
resolver: bundles
@@ -650,7 +638,7 @@ spec:
- name: name
value: push-dockerfile-oci-ta
- name: bundle
value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:49f778479f468e71c2cfef722e96aa813d7ef98bde8a612e1bf1a13cd70849ec
value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile-oci-ta:0.1@sha256:08bba4a659ecd48f871bef00b80af58954e5a09fcbb28a1783ddd640c4f6535e
- name: kind
value: task
resolver: bundles

View File

@@ -15,11 +15,11 @@ spec:
- description: The go command used to run the go unit test
name: TEST_COMMAND
type: string
default: go test ./...
default: make test
- description: The Go base image used to run the unit tests
name: BASE_IMAGE
type: string
default: registry.redhat.io/ubi9/go-toolset:1.23
default: registry.access.redhat.com/ubi10/go-toolset:1.25.3
stepTemplate:
volumeMounts:
- mountPath: /var/workdir

76
go.mod
View File

@@ -1,38 +1,37 @@
module github.com/openshift/source-to-image
go 1.23.3
toolchain go1.23.9
go 1.25.3
require (
github.com/containers/image/v5 v5.35.0
github.com/containerd/errdefs v1.0.0
github.com/containers/image/v5 v5.36.2
github.com/distribution/reference v0.6.0
github.com/docker/docker v28.1.1+incompatible
github.com/docker/go-connections v0.5.0
github.com/docker/docker v28.5.2+incompatible
github.com/docker/go-connections v0.6.0
github.com/go-imports-organizer/goio v1.5.0
github.com/moby/buildkit v0.22.0
github.com/moby/buildkit v0.26.2
github.com/moby/docker-image-spec v1.3.1
github.com/opencontainers/image-spec v1.1.1
github.com/spf13/cobra v1.9.1
github.com/spf13/pflag v1.0.6
golang.org/x/net v0.40.0
github.com/spf13/cobra v1.10.1
github.com/spf13/pflag v1.0.10
golang.org/x/net v0.47.0
k8s.io/klog/v2 v2.130.1
)
require (
dario.cat/mergo v1.0.1 // indirect
dario.cat/mergo v1.0.2 // indirect
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
github.com/BurntSushi/toml v1.5.0 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/Microsoft/hcsshim v0.12.9 // indirect
github.com/containerd/cgroups/v3 v3.0.5 // indirect
github.com/containerd/errdefs v1.0.0 // indirect
github.com/Microsoft/hcsshim v0.14.0-rc.1 // indirect
github.com/containerd/cgroups/v3 v3.1.1 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect
github.com/containerd/typeurl/v2 v2.2.3 // indirect
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
github.com/containers/ocicrypt v1.2.1 // indirect
github.com/containers/storage v1.58.0 // indirect
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
github.com/containers/storage v1.59.1 // indirect
github.com/cyphar/filepath-securejoin v0.5.1 // indirect
github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/docker-credential-helpers v0.9.3 // indirect
github.com/docker/go-units v0.5.0 // indirect
@@ -41,16 +40,15 @@ require (
github.com/go-logr/stdr v1.2.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
github.com/google/go-containerregistry v0.20.3 // indirect
github.com/google/go-containerregistry v0.20.6 // indirect
github.com/google/go-intervals v0.0.2 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/mux v1.8.1 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/compress v1.18.1 // indirect
github.com/klauspost/pgzip v1.2.6 // indirect
github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/mistifyio/go-zfs/v3 v3.1.0 // indirect
github.com/moby/sys/capability v0.4.0 // indirect
github.com/moby/sys/mountinfo v0.7.2 // indirect
github.com/moby/sys/user v0.4.0 // indirect
@@ -59,27 +57,27 @@ require (
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/runtime-spec v1.2.1 // indirect
github.com/opencontainers/selinux v1.12.0 // indirect
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
github.com/opencontainers/runtime-spec v1.3.0 // indirect
github.com/opencontainers/selinux v1.13.1 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/sylabs/sif/v2 v2.21.1 // indirect
github.com/tchap/go-patricia/v2 v2.3.2 // indirect
github.com/ulikunitz/xz v0.5.12 // indirect
github.com/vbatts/tar-split v0.12.1 // indirect
github.com/sylabs/sif/v2 v2.22.0 // indirect
github.com/tchap/go-patricia/v2 v2.3.3 // indirect
github.com/ulikunitz/xz v0.5.15 // indirect
github.com/vbatts/tar-split v0.12.2 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect
go.opentelemetry.io/otel v1.37.0 // indirect
go.opentelemetry.io/otel/metric v1.37.0 // indirect
go.opentelemetry.io/otel/trace v1.37.0 // indirect
golang.org/x/mod v0.24.0 // indirect
golang.org/x/sync v0.13.0 // indirect
golang.org/x/sys v0.33.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 // indirect
google.golang.org/grpc v1.71.0 // indirect
google.golang.org/protobuf v1.36.6 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
go.opentelemetry.io/otel v1.38.0 // indirect
go.opentelemetry.io/otel/metric v1.38.0 // indirect
go.opentelemetry.io/otel/trace v1.38.0 // indirect
golang.org/x/mod v0.30.0 // indirect
golang.org/x/sync v0.18.0 // indirect
golang.org/x/sys v0.38.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20251111163417-95abcf5c77ba // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20251111163417-95abcf5c77ba // indirect
google.golang.org/grpc v1.77.0 // indirect
google.golang.org/protobuf v1.36.10 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

210
go.sum
View File

@@ -1,8 +1,6 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg=
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0=
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
@@ -10,58 +8,58 @@ github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg=
github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y=
github.com/Microsoft/hcsshim v0.14.0-rc.1 h1:qAPXKwGOkVn8LlqgBN8GS0bxZ83hOJpcjxzmlQKxKsQ=
github.com/Microsoft/hcsshim v0.14.0-rc.1/go.mod h1:hTKFGbnDtQb1wHiOWv4v0eN+7boSWAHyK/tNAaYZL0c=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo=
github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins=
github.com/containerd/cgroups/v3 v3.1.1 h1:ASZmQGfOHbRj43/1aMn5QcWIsv0R/AuHHDNCguRY0p0=
github.com/containerd/cgroups/v3 v3.1.1/go.mod h1:PKZ2AcWmSBsY/tJUVhtS/rluX0b1uq1GmPO1ElCmbOw=
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8=
github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU=
github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8=
github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q=
github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40=
github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk=
github.com/containers/image/v5 v5.35.0 h1:T1OeyWp3GjObt47bchwD9cqiaAm/u4O4R9hIWdrdrP8=
github.com/containers/image/v5 v5.35.0/go.mod h1:8vTsgb+1gKcBL7cnjyNOInhJQfTUQjJoO2WWkKDoebM=
github.com/containers/image/v5 v5.36.2 h1:GcxYQyAHRF/pLqR4p4RpvKllnNL8mOBn0eZnqJbfTwk=
github.com/containers/image/v5 v5.36.2/go.mod h1:b4GMKH2z/5t6/09utbse2ZiLK/c72GuGLFdp7K69eA4=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM=
github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ=
github.com/containers/storage v1.58.0 h1:Q7SyyCCjqgT3wYNgRNIL8o/wUS92heIj2/cc8Sewvcc=
github.com/containers/storage v1.58.0/go.mod h1:w7Jl6oG+OpeLGLzlLyOZPkmUso40kjpzgrHUk5tyBlo=
github.com/containers/storage v1.59.1 h1:11Zu68MXsEQGBBd+GadPrHPpWeqjKS8hJDGiAHgIqDs=
github.com/containers/storage v1.59.1/go.mod h1:KoAYHnAjP3/cTsRS+mmWZGkufSY2GACiKQ4V3ZLQnR0=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s=
github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
github.com/cyphar/filepath-securejoin v0.5.1 h1:eYgfMq5yryL4fbWfkLpFFy2ukSELzaJOTaUTuh+oF48=
github.com/cyphar/filepath-securejoin v0.5.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k=
github.com/docker/cli v28.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v28.5.0+incompatible h1:crVqLrtKsrhC9c00ythRx435H8LiQnUKRtJLRR+Auxk=
github.com/docker/cli v28.5.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v28.1.1+incompatible h1:49M11BFLsVO1gxY9UX9p/zwkE/rswggs8AdFmXQw51I=
github.com/docker/docker v28.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM=
github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8=
github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@@ -105,8 +103,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI=
github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI=
github.com/google/go-containerregistry v0.20.6 h1:cvWX87UxxLgaH76b4hIvya6Dzz9qHB31qAwjAohdSTU=
github.com/google/go-containerregistry v0.20.6/go.mod h1:T0x8MuoAoKX/873bkeSfLD2FAkwCDf9/HZgsFJ02E2Y=
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -116,26 +114,26 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU=
github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
github.com/moby/buildkit v0.22.0 h1:aWN06w1YGSVN1XfeZbj2ZbgY+zi5xDAjEFI8Cy9fTjA=
github.com/moby/buildkit v0.22.0/go.mod h1:j4pP5hxiTWcz7xuTK2cyxQislHl/N2WWHzOy43DlLJw=
github.com/mistifyio/go-zfs/v3 v3.1.0 h1:FZaylcg0hjUp27i23VcJJQiuBeAZjrC8lPqCGM1CopY=
github.com/mistifyio/go-zfs/v3 v3.1.0/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k=
github.com/moby/buildkit v0.26.2 h1:EIh5j0gzRsCZmQzvgNNWzSDbuKqwUIiBH7ssqLv8RU8=
github.com/moby/buildkit v0.26.2/go.mod h1:ylDa7IqzVJgLdi/wO7H1qLREFQpmhFbw2fbn4yoTw40=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=
@@ -163,12 +161,10 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww=
github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/selinux v1.12.0 h1:6n5JV4Cf+4y0KNXW48TLj5DwfXpvWlxXplUkdTrmPb8=
github.com/opencontainers/selinux v1.12.0/go.mod h1:BTPX+bjVbWGXw7ZZWUbdENt8w0htPSrlgOOysQaU62U=
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M=
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
github.com/opencontainers/runtime-spec v1.3.0 h1:YZupQUdctfhpZy3TM39nN9Ika5CBWT5diQ8ibYCRkxg=
github.com/opencontainers/runtime-spec v1.3.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/selinux v1.13.1 h1:A8nNeceYngH9Ow++M+VVEwJVpdFmrlxsN22F+ISDCJE=
github.com/opencontainers/selinux v1.13.1/go.mod h1:S10WXZ/osk2kWOYKy1x2f/eXF5ZHJoUs8UU/2caNRbg=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
@@ -176,28 +172,29 @@ github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk=
github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sebdah/goldie/v2 v2.5.5 h1:rx1mwF95RxZ3/83sdS4Yp7t2C5TCokvWP4TBRbAyEWY=
github.com/sebdah/goldie/v2 v2.5.5/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
github.com/sebdah/goldie/v2 v2.7.1 h1:PkBHymaYdtvEkZV7TmyqKxdmn5/Vcj+8TpATWZjnG5E=
github.com/sebdah/goldie/v2 v2.7.1/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
@@ -206,38 +203,42 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/sylabs/sif/v2 v2.21.1 h1:GZ0b5//AFAqJEChd8wHV/uSKx/l1iuGYwjR8nx+4wPI=
github.com/sylabs/sif/v2 v2.21.1/go.mod h1:YoqEGQnb5x/ItV653bawXHZJOXQaEWpGwHsSD3YePJI=
github.com/tchap/go-patricia/v2 v2.3.2 h1:xTHFutuitO2zqKAQ5rCROYgUb7Or/+IC3fts9/Yc7nM=
github.com/tchap/go-patricia/v2 v2.3.2/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo=
github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/sylabs/sif/v2 v2.22.0 h1:Y+xXufp4RdgZe02SR3nWEg7S6q4tPWN237WHYzkDSKA=
github.com/sylabs/sif/v2 v2.22.0/go.mod h1:W1XhWTmG1KcG7j5a3KSYdMcUIFvbs240w/MMVW627hs=
github.com/tchap/go-patricia/v2 v2.3.3 h1:xfNEsODumaEcCcY3gI0hYPZ/PcpVv5ju6RMAhgwZDDc=
github.com/tchap/go-patricia/v2 v2.3.3/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY=
github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4=
github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I=
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0=
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A=
go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4=
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@@ -247,8 +248,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -258,30 +259,30 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@@ -299,18 +300,17 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950=
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 h1:iK2jbkWL86DXjEx0qiHcRE9dE4/Ahua5k6V8OWFb//c=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
google.golang.org/genproto/googleapis/api v0.0.0-20251111163417-95abcf5c77ba h1:B14OtaXuMaCQsl2deSvNkyPKIzq3BjfxQp8d00QyWx4=
google.golang.org/genproto/googleapis/api v0.0.0-20251111163417-95abcf5c77ba/go.mod h1:G5IanEx8/PgI9w6CFcYQf7jMtHQhZruvfM1i3qOqk5U=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251111163417-95abcf5c77ba h1:UKgtfRM7Yh93Sya0Fo8ZzhDP4qBckrrxEr2oF5UIVb8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20251111163417-95abcf5c77ba/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg=
google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM=
google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -320,8 +320,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=

View File

@@ -3,7 +3,7 @@
#
# The standard name for this image is openshift/sti-release
#
FROM registry.redhat.io/ubi9/go-toolset:1.23 AS builder
FROM registry.redhat.io/ubi9/go-toolset:1.24 AS builder
USER root
ENV S2I_VERSION_FILE=/opt/app-root/src/source-to-image/sti-version-defs

View File

@@ -7,8 +7,10 @@ import (
"encoding/base64"
"encoding/json"
"fmt"
"github.com/containerd/errdefs"
"github.com/docker/docker/api/types/build"
"github.com/docker/docker/api/types/common"
"io"
"io/ioutil"
"math/rand"
"net/http"
"os"
@@ -21,7 +23,7 @@ import (
dockertypes "github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/image"
dockerimage "github.com/docker/docker/api/types/image"
dockernetwork "github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/registry"
dockerapi "github.com/docker/docker/client"
@@ -79,7 +81,7 @@ const containerNamePrefix = "s2i"
// meant to resemble Kubernetes' pkg/kubelet/dockertools.BuildDockerName.
func containerName(image string) string {
//Initialize seed
rand.Seed(time.Now().UnixNano())
rand.NewSource(time.Now().UnixNano())
uid := fmt.Sprintf("%08x", rand.Uint32())
// Replace invalid characters for container name with underscores.
image = strings.Map(func(r rune) rune {
@@ -124,19 +126,19 @@ type Docker interface {
// Client contains all methods used when interacting directly with docker engine-api
type Client interface {
ContainerAttach(ctx context.Context, container string, options dockercontainer.AttachOptions) (dockertypes.HijackedResponse, error)
ContainerCommit(ctx context.Context, container string, options dockercontainer.CommitOptions) (dockertypes.IDResponse, error)
ContainerCommit(ctx context.Context, container string, options dockercontainer.CommitOptions) (common.IDResponse, error)
ContainerCreate(ctx context.Context, config *dockercontainer.Config, hostConfig *dockercontainer.HostConfig, networkingConfig *dockernetwork.NetworkingConfig, platform *v1.Platform, containerName string) (dockercontainer.CreateResponse, error)
ContainerInspect(ctx context.Context, container string) (dockertypes.ContainerJSON, error)
ContainerInspect(ctx context.Context, container string) (dockercontainer.InspectResponse, error)
ContainerRemove(ctx context.Context, container string, options dockercontainer.RemoveOptions) error
ContainerStart(ctx context.Context, container string, options dockercontainer.StartOptions) error
ContainerKill(ctx context.Context, container, signal string) error
ContainerWait(ctx context.Context, container string, condition dockercontainer.WaitCondition) (<-chan dockercontainer.WaitResponse, <-chan error)
CopyToContainer(ctx context.Context, container, path string, content io.Reader, opts dockercontainer.CopyToContainerOptions) error
CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, dockercontainer.PathStat, error)
ImageBuild(ctx context.Context, buildContext io.Reader, options dockertypes.ImageBuildOptions) (dockertypes.ImageBuildResponse, error)
ImageInspectWithRaw(ctx context.Context, image string) (dockertypes.ImageInspect, []byte, error)
ImagePull(ctx context.Context, ref string, options image.PullOptions) (io.ReadCloser, error)
ImageRemove(ctx context.Context, image string, options image.RemoveOptions) ([]image.DeleteResponse, error)
ImageBuild(ctx context.Context, buildContext io.Reader, options build.ImageBuildOptions) (build.ImageBuildResponse, error)
ImageInspectWithRaw(ctx context.Context, image string) (dockerimage.InspectResponse, []byte, error)
ImagePull(ctx context.Context, ref string, options dockerimage.PullOptions) (io.ReadCloser, error)
ImageRemove(ctx context.Context, image string, options dockerimage.RemoveOptions) ([]dockerimage.DeleteResponse, error)
ServerVersion(ctx context.Context) (dockertypes.Version, error)
}
@@ -146,7 +148,7 @@ type stiDocker struct {
}
// InspectImage returns the image information and its raw representation.
func (d stiDocker) InspectImage(name string) (*dockertypes.ImageInspect, error) {
func (d stiDocker) InspectImage(name string) (*dockerimage.InspectResponse, error) {
ctx, cancel := getDefaultContext()
defer cancel()
resp, _, err := d.client.ImageInspectWithRaw(ctx, name)
@@ -436,7 +438,7 @@ func (d *stiDocker) IsImageInLocalRegistry(name string) (bool, error) {
if resp != nil {
return true, nil
}
if err != nil && !dockerapi.IsErrNotFound(err) {
if err != nil && !errdefs.IsNotFound(err) {
return false, s2ierr.NewInspectImageError(name, err)
}
return false, nil
@@ -550,7 +552,7 @@ func (d *stiDocker) PullImage(name string) (*api.Image, error) {
for retries := 0; retries <= DefaultPullRetryCount; retries++ {
err = util.TimeoutAfter(DefaultDockerTimeout, fmt.Sprintf("pulling image %q", name), func(timer *time.Timer) error {
resp, pullErr := d.client.ImagePull(context.Background(), name, image.PullOptions{RegistryAuth: base64Auth})
resp, pullErr := d.client.ImagePull(context.Background(), name, dockerimage.PullOptions{RegistryAuth: base64Auth})
if pullErr != nil {
return pullErr
}
@@ -612,7 +614,7 @@ func (d *stiDocker) PullImage(name string) (*api.Image, error) {
return nil, nil
}
func updateImageWithInspect(image *api.Image, inspect *dockertypes.ImageInspect) {
func updateImageWithInspect(image *api.Image, inspect *dockerimage.InspectResponse) {
image.ID = inspect.ID
if inspect.Config != nil {
image.Config = &api.ContainerConfig{
@@ -859,10 +861,10 @@ func dumpContainerInfo(container dockercontainer.CreateResponse, d *stiDocker, i
// then the container, will block.
func (d *stiDocker) redirectResponseToOutputStream(tty bool, outputStream, errorStream io.Writer, resp io.Reader) error {
if outputStream == nil {
outputStream = ioutil.Discard
outputStream = io.Discard
}
if errorStream == nil {
errorStream = ioutil.Discard
errorStream = io.Discard
}
var err error
if tty {
@@ -1142,13 +1144,13 @@ func (d *stiDocker) CommitContainer(opts CommitContainerOptions) (string, error)
func (d *stiDocker) RemoveImage(imageID string) error {
ctx, cancel := getDefaultContext()
defer cancel()
_, err := d.client.ImageRemove(ctx, imageID, image.RemoveOptions{})
_, err := d.client.ImageRemove(ctx, imageID, dockerimage.RemoveOptions{})
return err
}
// BuildImage builds the image according to specified options
func (d *stiDocker) BuildImage(opts BuildImageOptions) error {
dockerOpts := dockertypes.ImageBuildOptions{
dockerOpts := build.ImageBuildOptions{
Tags: []string{opts.Name},
NoCache: true,
SuppressOutput: false,

View File

@@ -3,6 +3,7 @@ package docker
import (
"bytes"
"fmt"
"github.com/docker/docker/api/types/image"
"io/ioutil"
"os"
"path/filepath"
@@ -14,7 +15,8 @@ import (
dockercontainer "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/registry"
dockerstrslice "github.com/docker/docker/api/types/strslice"
dockerspec "github.com/moby/docker-image-spec/specs-go/v1"
containerspec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/openshift/source-to-image/pkg/api/constants"
dockertest "github.com/openshift/source-to-image/pkg/docker/test"
"github.com/openshift/source-to-image/pkg/errors"
@@ -252,7 +254,7 @@ func TestImageBuild(t *testing.T) {
func TestGetScriptsURL(t *testing.T) {
type urltest struct {
image dockertypes.ImageInspect
image image.InspectResponse
result string
calls []string
inspectErr error
@@ -260,14 +262,16 @@ func TestGetScriptsURL(t *testing.T) {
tests := map[string]urltest{
"not present": {
calls: []string{"inspect_image"},
image: dockertypes.ImageInspect{
image: image.InspectResponse{
ContainerConfig: &dockercontainer.Config{
Env: []string{"Env1=value1"},
Labels: map[string]string{},
},
Config: &dockercontainer.Config{
Env: []string{"Env2=value2"},
Labels: map[string]string{},
Config: &dockerspec.DockerOCIImageConfig{
ImageConfig: containerspec.ImageConfig{
Env: []string{"Env2=value2"},
Labels: map[string]string{},
},
},
},
result: "",
@@ -275,24 +279,26 @@ func TestGetScriptsURL(t *testing.T) {
"env in containerConfig": {
calls: []string{"inspect_image"},
image: dockertypes.ImageInspect{
image: image.InspectResponse{
ContainerConfig: &dockercontainer.Config{
Env: []string{"Env1=value1", constants.ScriptsURLEnvironment + "=test_url_value"},
},
Config: &dockercontainer.Config{},
Config: &dockerspec.DockerOCIImageConfig{},
},
result: "",
},
"env in image config": {
calls: []string{"inspect_image"},
image: dockertypes.ImageInspect{
image: image.InspectResponse{
ContainerConfig: &dockercontainer.Config{},
Config: &dockercontainer.Config{
Env: []string{
"Env1=value1",
constants.ScriptsURLEnvironment + "=test_url_value_2",
"Env2=value2",
Config: &dockerspec.DockerOCIImageConfig{
ImageConfig: containerspec.ImageConfig{
Env: []string{
"Env1=value1",
constants.ScriptsURLEnvironment + "=test_url_value_2",
"Env2=value2",
},
},
},
},
@@ -301,21 +307,23 @@ func TestGetScriptsURL(t *testing.T) {
"label in containerConfig": {
calls: []string{"inspect_image"},
image: dockertypes.ImageInspect{
image: image.InspectResponse{
ContainerConfig: &dockercontainer.Config{
Labels: map[string]string{constants.ScriptsURLLabel: "test_url_value"},
},
Config: &dockercontainer.Config{},
Config: &dockerspec.DockerOCIImageConfig{},
},
result: "",
},
"label in image config": {
calls: []string{"inspect_image"},
image: dockertypes.ImageInspect{
image: image.InspectResponse{
ContainerConfig: &dockercontainer.Config{},
Config: &dockercontainer.Config{
Labels: map[string]string{constants.ScriptsURLLabel: "test_url_value_2"},
Config: &dockerspec.DockerOCIImageConfig{
ImageConfig: containerspec.ImageConfig{
Labels: map[string]string{constants.ScriptsURLLabel: "test_url_value_2"},
},
},
},
result: "test_url_value_2",
@@ -323,7 +331,7 @@ func TestGetScriptsURL(t *testing.T) {
"inspect error": {
calls: []string{"inspect_image", "pull"},
image: dockertypes.ImageInspect{},
image: image.InspectResponse{},
inspectErr: fmt.Errorf("Inspect error"),
},
}
@@ -334,7 +342,7 @@ func TestGetScriptsURL(t *testing.T) {
if tst.inspectErr != nil {
fakeDocker.PullFail = tst.inspectErr
} else {
fakeDocker.Images = map[string]dockertypes.ImageInspect{tst.image.ID: tst.image}
fakeDocker.Images = map[string]image.InspectResponse{tst.image.ID: tst.image}
}
url, err := dh.GetScriptsURL(tst.image.ID)
@@ -354,7 +362,7 @@ func TestGetScriptsURL(t *testing.T) {
func TestRunContainer(t *testing.T) {
type runtest struct {
calls []string
image dockertypes.ImageInspect
image image.InspectResponse
cmd string
externalScripts bool
paramScriptsURL string
@@ -368,9 +376,9 @@ func TestRunContainer(t *testing.T) {
tests := map[string]runtest{
"default": {
calls: []string{"inspect_image", "inspect_image", "inspect_image", "create", "attach", "start", "remove"},
image: dockertypes.ImageInspect{
image: image.InspectResponse{
ContainerConfig: &dockercontainer.Config{},
Config: &dockercontainer.Config{},
Config: &dockerspec.DockerOCIImageConfig{},
},
cmd: constants.Assemble,
externalScripts: true,
@@ -378,9 +386,9 @@ func TestRunContainer(t *testing.T) {
},
"runerror": {
calls: []string{"inspect_image", "inspect_image", "inspect_image", "create", "attach", "start", "remove"},
image: dockertypes.ImageInspect{
image: image.InspectResponse{
ContainerConfig: &dockercontainer.Config{},
Config: &dockercontainer.Config{},
Config: &dockerspec.DockerOCIImageConfig{},
},
cmd: constants.Assemble,
externalScripts: true,
@@ -399,9 +407,9 @@ func TestRunContainer(t *testing.T) {
},
"paramDestination": {
calls: []string{"inspect_image", "inspect_image", "inspect_image", "create", "attach", "start", "remove"},
image: dockertypes.ImageInspect{
image: image.InspectResponse{
ContainerConfig: &dockercontainer.Config{},
Config: &dockercontainer.Config{},
Config: &dockerspec.DockerOCIImageConfig{},
},
cmd: constants.Assemble,
externalScripts: true,
@@ -410,9 +418,9 @@ func TestRunContainer(t *testing.T) {
},
"paramDestination&paramScripts": {
calls: []string{"inspect_image", "inspect_image", "inspect_image", "create", "attach", "start", "remove"},
image: dockertypes.ImageInspect{
image: image.InspectResponse{
ContainerConfig: &dockercontainer.Config{},
Config: &dockercontainer.Config{},
Config: &dockerspec.DockerOCIImageConfig{},
},
cmd: constants.Assemble,
externalScripts: true,
@@ -422,10 +430,12 @@ func TestRunContainer(t *testing.T) {
},
"scriptsInsideImageEnvironment": {
calls: []string{"inspect_image", "inspect_image", "inspect_image", "create", "attach", "start", "remove"},
image: dockertypes.ImageInspect{
image: image.InspectResponse{
ContainerConfig: &dockercontainer.Config{},
Config: &dockercontainer.Config{
Env: []string{constants.ScriptsURLEnvironment + "=image:///opt/bin/"},
Config: &dockerspec.DockerOCIImageConfig{
ImageConfig: containerspec.ImageConfig{
Env: []string{constants.ScriptsURLEnvironment + "=image:///opt/bin/"},
},
},
},
cmd: constants.Assemble,
@@ -434,10 +444,12 @@ func TestRunContainer(t *testing.T) {
},
"scriptsInsideImageLabel": {
calls: []string{"inspect_image", "inspect_image", "inspect_image", "create", "attach", "start", "remove"},
image: dockertypes.ImageInspect{
image: image.InspectResponse{
ContainerConfig: &dockercontainer.Config{},
Config: &dockercontainer.Config{
Labels: map[string]string{constants.ScriptsURLLabel: "image:///opt/bin/"},
Config: &dockerspec.DockerOCIImageConfig{
ImageConfig: containerspec.ImageConfig{
Labels: map[string]string{constants.ScriptsURLLabel: "image:///opt/bin/"},
},
},
},
cmd: constants.Assemble,
@@ -446,10 +458,12 @@ func TestRunContainer(t *testing.T) {
},
"scriptsInsideImageEnvironmentWithParamDestination": {
calls: []string{"inspect_image", "inspect_image", "inspect_image", "create", "attach", "start", "remove"},
image: dockertypes.ImageInspect{
image: image.InspectResponse{
ContainerConfig: &dockercontainer.Config{},
Config: &dockercontainer.Config{
Env: []string{constants.ScriptsURLEnvironment + "=image:///opt/bin"},
Config: &dockerspec.DockerOCIImageConfig{
ImageConfig: containerspec.ImageConfig{
Env: []string{constants.ScriptsURLEnvironment + "=image:///opt/bin"},
},
},
},
cmd: constants.Assemble,
@@ -459,10 +473,12 @@ func TestRunContainer(t *testing.T) {
},
"scriptsInsideImageLabelWithParamDestination": {
calls: []string{"inspect_image", "inspect_image", "inspect_image", "create", "attach", "start", "remove"},
image: dockertypes.ImageInspect{
image: image.InspectResponse{
ContainerConfig: &dockercontainer.Config{},
Config: &dockercontainer.Config{
Labels: map[string]string{constants.ScriptsURLLabel: "image:///opt/bin"},
Config: &dockerspec.DockerOCIImageConfig{
ImageConfig: containerspec.ImageConfig{
Labels: map[string]string{constants.ScriptsURLLabel: "image:///opt/bin"},
},
},
},
cmd: constants.Assemble,
@@ -472,10 +488,12 @@ func TestRunContainer(t *testing.T) {
},
"paramDestinationFromImageEnvironment": {
calls: []string{"inspect_image", "inspect_image", "inspect_image", "create", "attach", "start", "remove"},
image: dockertypes.ImageInspect{
image: image.InspectResponse{
ContainerConfig: &dockercontainer.Config{},
Config: &dockercontainer.Config{
Env: []string{constants.LocationEnvironment + "=/opt", constants.ScriptsURLEnvironment + "=http://my.test.url/test?param=one"},
Config: &dockerspec.DockerOCIImageConfig{
ImageConfig: containerspec.ImageConfig{
Env: []string{constants.LocationEnvironment + "=/opt", constants.ScriptsURLEnvironment + "=http://my.test.url/test?param=one"},
},
},
},
cmd: constants.Assemble,
@@ -484,10 +502,12 @@ func TestRunContainer(t *testing.T) {
},
"paramDestinationFromImageLabel": {
calls: []string{"inspect_image", "inspect_image", "inspect_image", "create", "attach", "start", "remove"},
image: dockertypes.ImageInspect{
image: image.InspectResponse{
ContainerConfig: &dockercontainer.Config{},
Config: &dockercontainer.Config{
Labels: map[string]string{constants.DestinationLabel: "/opt", constants.ScriptsURLLabel: "http://my.test.url/test?param=one"},
Config: &dockerspec.DockerOCIImageConfig{
ImageConfig: containerspec.ImageConfig{
Labels: map[string]string{constants.DestinationLabel: "/opt", constants.ScriptsURLLabel: "http://my.test.url/test?param=one"},
},
},
},
cmd: constants.Assemble,
@@ -496,9 +516,9 @@ func TestRunContainer(t *testing.T) {
},
"usageCommand": {
calls: []string{"inspect_image", "inspect_image", "inspect_image", "create", "attach", "start", "remove"},
image: dockertypes.ImageInspect{
image: image.InspectResponse{
ContainerConfig: &dockercontainer.Config{},
Config: &dockercontainer.Config{},
Config: &dockerspec.DockerOCIImageConfig{},
},
cmd: constants.Usage,
externalScripts: true,
@@ -506,9 +526,9 @@ func TestRunContainer(t *testing.T) {
},
"otherCommand": {
calls: []string{"inspect_image", "inspect_image", "inspect_image", "create", "attach", "start", "remove"},
image: dockertypes.ImageInspect{
image: image.InspectResponse{
ContainerConfig: &dockercontainer.Config{},
Config: &dockercontainer.Config{},
Config: &dockerspec.DockerOCIImageConfig{},
},
cmd: constants.Run,
externalScripts: true,
@@ -520,7 +540,7 @@ func TestRunContainer(t *testing.T) {
fakeDocker := dockertest.NewFakeDockerClient()
dh := getDocker(fakeDocker)
tst.image.ID = "test/image:latest"
fakeDocker.Images = map[string]dockertypes.ImageInspect{tst.image.ID: tst.image}
fakeDocker.Images = map[string]image.InspectResponse{tst.image.ID: tst.image}
if len(fakeDocker.Containers) > 0 {
t.Errorf("newly created fake client should have empty container map: %+v", fakeDocker.Containers)
}
@@ -583,8 +603,8 @@ func TestRunContainer(t *testing.T) {
func TestGetImageID(t *testing.T) {
fakeDocker := dockertest.NewFakeDockerClient()
dh := getDocker(fakeDocker)
image := dockertypes.ImageInspect{ID: "test-abcd:latest"}
fakeDocker.Images = map[string]dockertypes.ImageInspect{image.ID: image}
img := image.InspectResponse{ID: "test-abcd:latest"}
fakeDocker.Images = map[string]image.InspectResponse{img.ID: img}
id, err := dh.GetImageID("test-abcd")
expectedCalls := []string{"inspect_image"}
if !reflect.DeepEqual(fakeDocker.Calls, expectedCalls) {
@@ -592,19 +612,19 @@ func TestGetImageID(t *testing.T) {
}
if err != nil {
t.Errorf("Unexpected error returned: %v", err)
} else if id != image.ID {
t.Errorf("Unexpected image id returned: %s", id)
} else if id != img.ID {
t.Errorf("Unexpected img id returned: %s", id)
}
}
func TestRemoveImage(t *testing.T) {
fakeDocker := dockertest.NewFakeDockerClient()
dh := getDocker(fakeDocker)
image := dockertypes.ImageInspect{ID: "test-abcd"}
fakeDocker.Images = map[string]dockertypes.ImageInspect{image.ID: image}
img := image.InspectResponse{ID: "test-abcd"}
fakeDocker.Images = map[string]image.InspectResponse{img.ID: img}
err := dh.RemoveImage("test-abcd")
if err != nil {
t.Errorf("Unexpected error removing image: %s", err)
t.Errorf("Unexpected error removing img: %s", err)
}
}

View File

@@ -29,7 +29,7 @@ func CreateLocalGitDirectory() (string, error) {
return "", err
}
err = cr.RunWithOptions(cmd.CommandOpts{Dir: dir, EnvAppend: []string{"GIT_AUTHOR_NAME=test", "GIT_AUTHOR_EMAIL=test@test", "GIT_COMMITTER_NAME=test", "GIT_COMMITTER_EMAIL=test@test"}}, "git", "commit", "-m", "testcommit")
err = cr.RunWithOptions(cmd.CommandOpts{Dir: dir, EnvAppend: []string{"GIT_AUTHOR_NAME=test", "GIT_AUTHOR_EMAIL=test@test", "GIT_COMMITTER_NAME=test", "GIT_COMMITTER_EMAIL=test@test"}}, "git", "commit", "-n", "-m", "testcommit")
if err != nil {
return "", err
}

View File

@@ -94,11 +94,11 @@ func TestParse(t *testing.T) {
},
},
parseTest{
rawurl: "http://[::ffff:1.2.3.4]:443",
rawurl: "http://::ffff:1.2.3.4:443",
expectedGitURL: &URL{
URL: url.URL{
Scheme: "http",
Host: "[::ffff:1.2.3.4]:443",
Host: "::ffff:1.2.3.4:443",
},
Type: URLTypeURL,
},
@@ -165,10 +165,10 @@ func TestParse(t *testing.T) {
},
},
parseTest{
rawurl: "[::ffff:1.2.3.4]:",
rawurl: "::ffff:1.2.3.4:",
expectedGitURL: &URL{
URL: url.URL{
Host: "[::ffff:1.2.3.4]",
Host: "::ffff:1.2.3.4",
},
Type: URLTypeSCP,
},

View File

@@ -1,6 +1,6 @@
FROM registry.access.redhat.com/ubi8/go-toolset:1.23 AS builder
FROM registry.access.redhat.com/ubi10/go-toolset:1.25.3 AS builder
ENV S2I_GIT_VERSION="1.5.0" \
ENV S2I_GIT_VERSION="1.5.2" \
S2I_GIT_MAJOR="1" \
S2I_GIT_MINOR="5"
@@ -23,11 +23,14 @@ LABEL \
name="source-to-image/source-to-image-rhel8" \
description="Source-to-Image is a builder image" \
summary="Source-to-Image is a builder image" \
version="1.6.0" \
version="v1.5.2" \
vendor="Red Hat, Inc." \
com.redhat.component="source-to-image-container" \
cpe="cpe:/a:redhat:source_to_image:1.5::el8" \
maintainer="openshift-builds@redhat.com" \
distribution-scope="public" \
release="v1.5.2" \
url="https://catalog.redhat.com/en/software/container-stacks/detail/5ec54a2e110f56bd24f2ddc7" \
io.k8s.description="Source-to-Image is a builder image" \
io.k8s.display-name="Source-to-Image" \
io.openshift.tags="source-to-image,s2i" \

View File

@@ -1,5 +1,5 @@
FROM registry.access.redhat.com/ubi8/go-toolset:1.23 AS builder
ENV S2I_GIT_VERSION="1.5.0" \
FROM registry.access.redhat.com/ubi10/go-toolset:1.25.3 AS builder
ENV S2I_GIT_VERSION="1.5.2" \
S2I_GIT_MAJOR="1" \
S2I_GIT_MINOR="5"
@@ -22,11 +22,14 @@ LABEL \
name="source-to-image/source-to-image-rhel9" \
description="Source-to-Image is a builder image" \
summary="Source-to-Image is a builder image" \
version="1.6.0" \
version="v1.5.2" \
vendor="Red Hat, Inc." \
com.redhat.component="source-to-image-container" \
cpe="cpe:/a:redhat:source_to_image:1.5::el8" \
maintainer="openshift-builds@redhat.com" \
distribution-scope="public" \
release="v1.5.2" \
url="https://catalog.redhat.com/en/software/container-stacks/detail/5ec54a2e110f56bd24f2ddc7" \
io.k8s.description="Source-to-Image is a builder image" \
io.k8s.display-name="Source-to-Image" \
io.openshift.tags="source-to-image,s2i" \

7
vendor/dario.cat/mergo/FUNDING.json vendored Normal file
View File

@@ -0,0 +1,7 @@
{
"drips": {
"ethereum": {
"ownedBy": "0x6160020e7102237aC41bdb156e94401692D76930"
}
}
}

View File

@@ -85,7 +85,6 @@ Mergo is used by [thousands](https://deps.dev/go/dario.cat%2Fmergo/v1.0.0/depend
* [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
* [go-micro/go-micro](https://github.com/go-micro/go-micro)
* [grafana/loki](https://github.com/grafana/loki)
* [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
* [masterminds/sprig](github.com/Masterminds/sprig)
* [moby/moby](https://github.com/moby/moby)
* [slackhq/nebula](https://github.com/slackhq/nebula)
@@ -191,10 +190,6 @@ func main() {
}
```
Note: if test are failing due missing package, please execute:
go get gopkg.in/yaml.v3
### Transformers
Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`?

View File

@@ -4,8 +4,8 @@
| Version | Supported |
| ------- | ------------------ |
| 0.3.x | :white_check_mark: |
| < 0.3 | :x: |
| 1.x.x | :white_check_mark: |
| < 1.0 | :x: |
## Security contact information

12
vendor/github.com/Microsoft/hcsshim/.clang-format generated vendored Normal file
View File

@@ -0,0 +1,12 @@
Language: Cpp
BasedOnStyle: Microsoft
BreakBeforeBraces: Attach
PointerAlignment: Left
AllowShortFunctionsOnASingleLine: All
# match Go style
IndentCaseLabels: false
# don't break comments over line limit (needed for CodeQL exceptions)
ReflowComments: false
InsertNewlineAtEOF: true
KeepEmptyLines:
AtEndOfFile: true

View File

@@ -1,174 +1,165 @@
version: "2"
run:
timeout: 8m
tests: true
build-tags:
- admin
- functional
- integration
skip-dirs:
# paths are relative to module root
- cri-containerd/test-images
tests: true
linters:
enable:
# defaults:
# - errcheck
# - gosimple
# - govet
# - ineffassign
# - staticcheck
# - typecheck
# - unused
- errorlint # error wrapping (eg, not using `errors.Is`, using `%s` instead of `%w` in `fmt.Errorf`)
- gofmt # whether code was gofmt-ed
- govet # enabled by default, but just to be sure
- nolintlint # ill-formed or insufficient nolint directives
- stylecheck # golint replacement
- thelper # test helpers without t.Helper()
linters-settings:
govet:
enable-all: true
disable:
# struct order is often for Win32 compat
# also, ignore pointer bytes/GC issues for now until performance becomes an issue
- fieldalignment
check-shadowing: true
settings:
govet:
disable:
# struct order is often for Win32 compat
# also, ignore pointer bytes/GC issues for now until performance becomes an issue
- fieldalignment
enable-all: true
stylecheck:
# https://staticcheck.io/docs/checks
checks: ["all"]
staticcheck:
# https://staticcheck.io/docs/checks
checks:
- all
issues:
exclude-rules:
# err is very often shadowed in nested scopes
- linters:
- govet
text: '^shadow: declaration of "err" shadows declaration'
exclusions:
generated: lax
presets:
- comments
- common-false-positives
- legacy
- std-error-handling
rules:
# err is very often shadowed in nested scopes
- linters:
- govet
text: '^shadow: declaration of "err" shadows declaration'
# path is relative to module root, which is ./test/
- path: cri-containerd
linters:
- stylecheck
text: "^ST1003: should not use underscores in package names$"
source: "^package cri_containerd$"
# path is relative to module root, which is ./test/
- linters:
- staticcheck
path: cri-containerd
text: "^ST1003: should not use underscores in package names$"
source: ^package cri_containerd$
# don't bother with propper error wrapping in test code
- path: cri-containerd
linters:
- errorlint
text: "non-wrapping format verb for fmt.Errorf"
# don't bother with propper error wrapping in test code
- linters:
- errorlint
path: cri-containerd
text: non-wrapping format verb for fmt.Errorf
# This repo has a LOT of generated schema files, operating system bindings, and other
# things that ST1003 from stylecheck won't like (screaming case Windows api constants for example).
# There's also some structs that we *could* change the initialisms to be Go friendly
# (Id -> ID) but they're exported and it would be a breaking change.
# This makes it so that most new code, code that isn't supposed to be a pretty faithful
# mapping to an OS call/constants, or non-generated code still checks if we're following idioms,
# while ignoring the things that are just noise or would be more of a hassle than it'd be worth to change.
- path: layer.go
linters:
- stylecheck
Text: "ST1003:"
# static check doesn't recognize `t.Fatal[f]?` when checking for potential `nil` pointer dereference
- linters:
- staticcheck
path: ".*_test.go$"
text: "^SA5011"
- path: hcsshim.go
linters:
- stylecheck
Text: "ST1003:"
# This repo has a LOT of generated schema files, operating system bindings, and other
# things that ST1003 from stylecheck won't like (screaming case Windows api constants for example).
# There's also some structs that we *could* change the initialisms to be Go friendly
# (Id -> ID) but they're exported and it would be a breaking change.
# This makes it so that most new code, code that isn't supposed to be a pretty faithful
# mapping to an OS call/constants, or non-generated code still checks if we're following idioms,
# while ignoring the things that are just noise or would be more of a hassle than it'd be worth to change.
- linters:
- staticcheck
path: layer.go
text: "ST1003:"
- linters:
- staticcheck
path: hcsshim.go
text: "ST1003:"
- linters:
- staticcheck
path: cmd/ncproxy/nodenetsvc/
text: "ST1003:"
- linters:
- staticcheck
path: cmd/ncproxy_mock/
text: "ST1003:"
- linters:
- staticcheck
path: internal/hcs/schema2/
- linters:
- staticcheck
path: internal/wclayer/
text: "ST1003:"
- linters:
- staticcheck
path: hcn/
text: "ST1003:"
- linters:
- staticcheck
path: internal/hcs/schema1/
text: "ST1003:"
- linters:
- staticcheck
path: internal/hns/
text: "ST1003:"
- linters:
- staticcheck
path: ext4/internal/compactext4/
text: "ST1003:"
- linters:
- staticcheck
path: ext4/internal/format/
text: "ST1003:"
- linters:
- staticcheck
path: internal/guestrequest/
text: "ST1003:"
- linters:
- staticcheck
path: internal/guest/prot/
text: "ST1003:"
- linters:
- staticcheck
path: internal/windevice/
text: "ST1003:"
- linters:
- staticcheck
path: internal/winapi/
text: "ST1003:"
- linters:
- staticcheck
path: internal/vmcompute/
text: "ST1003:"
- linters:
- staticcheck
path: internal/regstate/
text: "ST1003:"
- linters:
- staticcheck
path: internal/hcserror/
text: "ST1003:"
- path: cmd\\ncproxy\\nodenetsvc\\
linters:
- stylecheck
Text: "ST1003:"
# v0 APIs are deprecated, but still retained for backwards compatability
- linters:
- staticcheck
path: cmd/ncproxy/
text: "^SA1019: .*(ncproxygrpc|nodenetsvc)[/]?v0"
- linters:
- staticcheck
path: internal/tools/networkagent
text: "^SA1019: .*nodenetsvc[/]?v0"
- linters:
- staticcheck
path: internal/vhdx/info
text: "ST1003:"
- path: cmd\\ncproxy_mock\\
linters:
- stylecheck
Text: "ST1003:"
paths:
# paths are relative to module root
- cri-containerd/test-images
- path: internal\\hcs\\schema2\\
linters:
- stylecheck
- gofmt
- path: internal\\wclayer\\
linters:
- stylecheck
Text: "ST1003:"
- path: hcn\\
linters:
- stylecheck
Text: "ST1003:"
- path: internal\\hcs\\schema1\\
linters:
- stylecheck
Text: "ST1003:"
- path: internal\\hns\\
linters:
- stylecheck
Text: "ST1003:"
- path: ext4\\internal\\compactext4\\
linters:
- stylecheck
Text: "ST1003:"
- path: ext4\\internal\\format\\
linters:
- stylecheck
Text: "ST1003:"
- path: internal\\guestrequest\\
linters:
- stylecheck
Text: "ST1003:"
- path: internal\\guest\\prot\\
linters:
- stylecheck
Text: "ST1003:"
- path: internal\\windevice\\
linters:
- stylecheck
Text: "ST1003:"
- path: internal\\winapi\\
linters:
- stylecheck
Text: "ST1003:"
- path: internal\\vmcompute\\
linters:
- stylecheck
Text: "ST1003:"
- path: internal\\regstate\\
linters:
- stylecheck
Text: "ST1003:"
- path: internal\\hcserror\\
linters:
- stylecheck
Text: "ST1003:"
# v0 APIs are deprecated, but still retained for backwards compatability
- path: cmd\\ncproxy\\
linters:
- staticcheck
text: "^SA1019: .*(ncproxygrpc|nodenetsvc)[/]?v0"
- path: internal\\tools\\networkagent
linters:
- staticcheck
text: "^SA1019: .*nodenetsvc[/]?v0"
- path: internal\\vhdx\\info
linters:
- stylecheck
Text: "ST1003:"
formatters:
enable:
- gofmt
exclusions:
generated: lax
paths:
- cri-containerd/test-images
- internal/hcs/schema2/

View File

@@ -1,13 +1,20 @@
BASE:=base.tar.gz
DEV_BUILD:=0
include Makefile.bootfiles
GO:=go
GO_FLAGS:=-ldflags "-s -w" # strip Go binaries
CGO_ENABLED:=0
GOMODVENDOR:=
KMOD:=0
CFLAGS:=-O2 -Wall
LDFLAGS:=-static -s # strip C binaries
LDFLAGS:=-static -s #strip C binaries
LDLIBS:=
PREPROCESSORFLAGS:=
ifeq "$(KMOD)" "1"
LDFLAGS:= -s
LDLIBS:= -lkmod
PREPROCESSORFLAGS:=-DMODULES=1
endif
GO_FLAGS_EXTRA:=
ifeq "$(GOMODVENDOR)" "1"
@@ -23,108 +30,14 @@ SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST))))
# additional directories to search for rule prerequisites and targets
VPATH=$(SRCROOT)
DELTA_TARGET=out/delta.tar.gz
ifeq "$(DEV_BUILD)" "1"
DELTA_TARGET=out/delta-dev.tar.gz
endif
ifeq "$(SNP_BUILD)" "1"
DELTA_TARGET=out/delta-snp.tar.gz
endif
# The link aliases for gcstools
GCS_TOOLS=\
generichook \
install-drivers
# Common path prefix.
PATH_PREFIX:=
# These have PATH_PREFIX prepended to obtain the full path in recipies e.g. $(PATH_PREFIX)/$(VMGS_TOOL)
VMGS_TOOL:=
IGVM_TOOL:=
KERNEL_PATH:=
.PHONY: all always rootfs test snp simple
.DEFAULT_GOAL := all
all: out/initrd.img out/rootfs.tar.gz
clean:
find -name '*.o' -print0 | xargs -0 -r rm
rm -rf bin deps rootfs out
test:
cd $(SRCROOT) && $(GO) test -v ./internal/guest/...
rootfs: out/rootfs.vhd
snp: out/kernelinitrd.vmgs out/rootfs.hash.vhd out/rootfs.vhd out/v2056.vmgs
simple: out/simple.vmgs snp
%.vmgs: %.bin
rm -f $@
# du -BM returns the size of the bin file in M, eg 7M. The sed command replaces the M with *1024*1024 and then bc does the math to convert to bytes
$(PATH_PREFIX)/$(VMGS_TOOL) create --filepath $@ --filesize `du -BM $< | sed "s/M.*/*1024*1024/" | bc`
$(PATH_PREFIX)/$(VMGS_TOOL) write --filepath $@ --datapath $< -i=8
# Simplest debug UVM used to test changes to the linux kernel. No dmverity protection. Boots an initramdisk rather than directly booting a vhd disk.
out/simple.bin: out/initrd.img $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup_simple.sh
rm -f $@
python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 rdinit=/startup_simple.sh" -rdinit out/initrd.img -vtl 0
ROOTFS_DEVICE:=/dev/sda
VERITY_DEVICE:=/dev/sdb
# Debug build for use with uvmtester. UVM with dm-verity protected vhd disk mounted directly via the kernel command line. Ignores corruption in dm-verity protected disk. (Use dmesg to see if dm-verity is ignoring data corruption.)
out/v2056.bin: out/rootfs.vhd out/rootfs.hash.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup_v2056.sh
rm -f $@
python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(VERITY_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) 0 sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\" init=/startup_v2056.sh" -vtl 0
# Full UVM with dm-verity protected vhd disk mounted directly via the kernel command line.
out/kernelinitrd.bin: out/rootfs.vhd out/rootfs.hash.vhd out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup.sh
rm -f $@
python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(VERITY_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) 0 sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt)\" init=/startup.sh" -vtl 0
# Rule to make a vhd from a file. This is used to create the rootfs.hash.vhd from rootfs.hash.
%.vhd: % bin/cmd/tar2ext4
./bin/cmd/tar2ext4 -only-vhd -i $< -o $@
# Rule to make a vhd from an ext4 file. This is used to create the rootfs.vhd from rootfs.ext4.
%.vhd: %.ext4 bin/cmd/tar2ext4
./bin/cmd/tar2ext4 -only-vhd -i $< -o $@
%.hash %.hash.info %.hash.datablocks %.hash.rootdigest %hash.datablocksize %.hash.datasectors %.hash.hashblocksize: %.ext4 %.hash.salt
veritysetup format --no-superblock --salt $(shell cat out/rootfs.hash.salt) $< $*.hash > $*.hash.info
# Retrieve info required by dm-verity at boot time
# Get the blocksize of rootfs
cat $*.hash.info | awk '/^Root hash:/{ print $$3 }' > $*.hash.rootdigest
cat $*.hash.info | awk '/^Salt:/{ print $$2 }' > $*.hash.salt
cat $*.hash.info | awk '/^Data block size:/{ print $$4 }' > $*.hash.datablocksize
cat $*.hash.info | awk '/^Hash block size:/{ print $$4 }' > $*.hash.hashblocksize
cat $*.hash.info | awk '/^Data blocks:/{ print $$3 }' > $*.hash.datablocks
echo $$(( $$(cat $*.hash.datablocks) * $$(cat $*.hash.datablocksize) / 512 )) > $*.hash.datasectors
out/rootfs.hash.salt:
hexdump -vn32 -e'8/4 "%08X" 1 "\n"' /dev/random > $@
out/rootfs.ext4: out/rootfs.tar.gz bin/cmd/tar2ext4
gzip -f -d ./out/rootfs.tar.gz
./bin/cmd/tar2ext4 -i ./out/rootfs.tar -o $@
out/rootfs.tar.gz: out/initrd.img
rm -rf rootfs-conv
mkdir rootfs-conv
gunzip -c out/initrd.img | (cd rootfs-conv && cpio -imd)
tar -zcf $@ -C rootfs-conv .
rm -rf rootfs-conv
out/initrd.img: $(BASE) $(DELTA_TARGET) $(SRCROOT)/hack/catcpio.sh
$(SRCROOT)/hack/catcpio.sh "$(BASE)" $(DELTA_TARGET) > out/initrd.img.uncompressed
gzip -c out/initrd.img.uncompressed > $@
rm out/initrd.img.uncompressed
# This target includes utilities which may be useful for testing purposes.
out/delta-dev.tar.gz: out/delta.tar.gz bin/internal/tools/snp-report
rm -rf rootfs-dev
@@ -168,10 +81,7 @@ out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/ho
tar -zcf $@ -C rootfs .
rm -rf rootfs
out/containerd-shim-runhcs-v1.exe:
GOOS=windows $(GO_BUILD) -o $@ $(SRCROOT)/cmd/containerd-shim-runhcs-v1
bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report bin/cmd/dmverity-vhd:
bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report:
@mkdir -p $(dir $@)
GOOS=linux $(GO_BUILD) -o $@ $(SRCROOT)/$(@:bin/%=%)
@@ -181,8 +91,8 @@ bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o
bin/init: init/init.o vsockexec/vsock.o
@mkdir -p bin
$(CC) $(LDFLAGS) -o $@ $^
$(CC) $(LDFLAGS) -o $@ $^ $(LDLIBS)
%.o: %.c
@mkdir -p $(dir $@)
$(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $<
$(CC) $(PREPROCESSORFLAGS) $(CFLAGS) $(CPPFLAGS) -c -o $@ $<

197
vendor/github.com/Microsoft/hcsshim/Makefile.bootfiles generated vendored Normal file
View File

@@ -0,0 +1,197 @@
BASE:=base.tar.gz
DEV_BUILD:=0
DELTA_TARGET=out/delta.tar.gz
ifeq "$(DEV_BUILD)" "1"
DELTA_TARGET=out/delta-dev.tar.gz
endif
ifeq "$(SNP_BUILD)" "1"
DELTA_TARGET=out/delta-snp.tar.gz
endif
SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST))))
PATH_PREFIX:=
# These have PATH_PREFIX prepended to obtain the full path in recipies e.g. $(PATH_PREFIX)/$(VMGS_TOOL)
VMGS_TOOL:=
IGVM_TOOL:=
KERNEL_PATH:=
TAR2EXT4_TOOL:=bin/cmd/tar2ext4
ROOTFS_DEVICE:=/dev/sda
HASH_DEVICE:=/dev/sdb
.PHONY: all always rootfs test snp simple
.DEFAULT_GOAL := all
all: out/initrd.img out/rootfs.tar.gz
clean:
find -name '*.o' -print0 | xargs -0 -r rm
rm -rf bin rootfs out
rootfs: out/rootfs.vhd
snp: out/kernel.vmgs out/rootfs-verity.vhd out/v2056.vmgs out/v2056combined.vmgs
simple: out/simple.vmgs snp
%.vmgs: %.bin
rm -f $@
# du -BM returns the size of the bin file in M, eg 7M. The sed command replaces the M with *1024*1024 and then bc does the math to convert to bytes
$(PATH_PREFIX)/$(VMGS_TOOL) create --filepath $@ --filesize `du -BM $< | sed "s/M.*/*1024*1024/" | bc`
$(PATH_PREFIX)/$(VMGS_TOOL) write --filepath $@ --datapath $< -i=8
# Simplest debug UVM used to test changes to the linux kernel. No dmverity protection. Boots an initramdisk rather than directly booting a vhd disk.
out/simple.bin: out/initrd.img $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup_simple.sh
rm -f $@
python3 $(PATH_PREFIX)/$(IGVM_TOOL) \
-o $@ \
-kernel $(PATH_PREFIX)/$(KERNEL_PATH) \
-append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 rdinit=/startup_simple.sh" \
-rdinit out/initrd.img \
-vtl 0
# The boot performance is optimized by supplying rootfs as a SCSI attachment. In this case the kernel boots with
# dm-verity to ensure the integrity. Similar to layer VHDs the verity Merkle tree is appended to ext4 filesystem.
# It transpires that the /dev/sd* order is not deterministic wrt the scsi device order. Thus build a single userland
# fs + merkle tree device and boot that.
#
# From https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-init.html
#
# dm-mod.create=<name>,<uuid>,<minor>,<flags>,<table>[,<table>+][;<name>,<uuid>,<minor>,<flags>,<table>[,<table>+]+]
#
# where:
# <name> ::= The device name.
# <uuid> ::= xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx | ""
# <minor> ::= The device minor number | ""
# <flags> ::= "ro" | "rw"
# <table> ::= <start_sector> <num_sectors> <target_type> <target_args>
# <target_type> ::= "verity" | "linear" | ... (see list below)
#
# From https://docs.kernel.org/admin-guide/device-mapper/verity.html
# <version> <dev> <hash_dev>
# <data_block_size> <hash_block_size>
# <num_data_blocks> <hash_start_block>
# <algorithm> <digest> <salt>
# [<#opt_params> <opt_params>]
#
# typical igvm tool line once all the macros are expanded
# python3 /home/user/igvmfile.py -o out/v2056.bin -kernel /hose/user/bzImage -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=9 ignore_loglevel dev.scsi.logging_level=9411 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 196744 verity 1 /dev/sda /dev/sdb 4096 4096 24593 0 sha256 6d625a306aafdf73125a84388b7bfdd2c3a154bd8d698955f4adffc736bdfd66 b9065c23231f0d8901cc3a68e1d3b8d624213e76d6f9f6d3ccbcb829f9c710ba 1 ignore_corruption\" init=/startup_v2056.sh" -vtl 0
#
# so a kernel command line of:
# 8250_core.nr_uarts=0 panic=-1 debug loglevel=9 ignore_loglevel dev.scsi.logging_level=9411 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 196744 verity 1 /dev/sda /dev/sdb 4096 4096 24593 0 sha256 6d625a306aafdf73125a84388b7bfdd2c3a154bd8d698955f4adffc736bdfd66 b9065c23231f0d8901cc3a68e1d3b8d624213e76d6f9f6d3ccbcb829f9c710ba 1 ignore_corruption\" init=/startup_v2056.sh
#
# and a dm-mod.create of:
# dmverity,,,ro,0 196744 verity 1 /dev/sda /dev/sdb 4096 4096 24593 0 sha256 6d625a306aafdf73125a84388b7bfdd2c3a154bd8d698955f4adffc736bdfd66 b9065c23231f0d8901cc3a68e1d3b8d624213e76d6f9f6d3ccbcb829f9c710ba 1 ignore_corruption
#
# which breaks down to:
#
# name = "dmverity"
# uuid = ""
# minor = ""
# flags = "ro"
# table = 0 196744 verity "args"
# start_sector = 0
# num_sectors = 196744
# target_type = verity
# target_args = 1 /dev/sda /dev/sdb 4096 4096 24593 0 sha256 6d625a306aafdf73125a84388b7bfdd2c3a154bd8d698955f4adffc736bdfd66 b9065c23231f0d8901cc3a68e1d3b8d624213e76d6f9f6d3ccbcb829f9c710ba 1 ignore_corruption
# args:
# version 1
# dev /dev/sda
# hash_dev /dev/sdb
# data_block_size 4096
# hash_block_size 4096
# num_data_blocks 24593
# hash_start_block 0
# algorithm sha256
# digest 6d625a306aafdf73125a84388b7bfdd2c3a154bd8d698955f4adffc736bdfd66
# salt b9065c23231f0d8901cc3a68e1d3b8d624213e76d6f9f6d3ccbcb829f9c710ba
# opt_params
# count = 1
# ignore_corruption
#
# combined typical (not bigger count of sectors for the whole device)
# dmverity,,,ro,0 199672 verity 1 /dev/sda /dev/sda 4096 4096 24959 24959 sha256 4aa6e79866ee946ddbd9cddd6554bc6449272942fcc65934326817785a3bd374 adc4956274489c936395bab046a2d476f21ef436e571ba53da2fdf3aee59bf0a
#
# A few notes:
# - num_sectors is the size of the final (aka target) verity device, i.e. the size of our rootfs excluding the Merkle
# tree.
# - We don't add verity superblock, so the <hash_start_block> will be exactly at the end of ext4 filesystem and equal
# to its size. In the case when verity superblock is present an extra block should be added to the offset value,
# i.e. 24959 becomes 24960.
# Debug build for use with uvmtester. UVM with dm-verity protected vhd disk mounted directly via the kernel command line.
# Ignores corruption in dm-verity protected disk. (Use dmesg to see if dm-verity is ignoring data corruption.)
out/v2056.bin: out/rootfs.vhd out/rootfs.hash.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup_v2056.sh
rm -f $@
python3 $(PATH_PREFIX)/$(IGVM_TOOL) \
-o $@ \
-kernel $(PATH_PREFIX)/$(KERNEL_PATH) \
-append "8250_core.nr_uarts=0 panic=-1 debug loglevel=9 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(HASH_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) $(shell cat out/rootfs.hash.datablocks) sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\" init=/startup_v2056.sh" \
-vtl 0
out/v2056combined.bin: out/rootfs-verity.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup_v2056.sh
rm -f $@
echo root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(ROOTFS_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) $(shell cat out/rootfs.hash.datablocks) sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\"
python3 $(PATH_PREFIX)/$(IGVM_TOOL) \
-o $@ \
-kernel $(PATH_PREFIX)/$(KERNEL_PATH) \
-append "8250_core.nr_uarts=0 panic=-1 debug loglevel=9 ignore_loglevel dev.scsi.logging_level=9411 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(ROOTFS_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) $(shell cat out/rootfs.hash.datablocks) sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\" init=/startup_v2056.sh" \
-vtl 0
# Full UVM with dm-verity protected vhd disk mounted directly via the kernel command line.
out/kernel.bin: out/rootfs-verity.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup.sh
rm -f $@
echo root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(ROOTFS_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) $(shell cat out/rootfs.hash.datablocks) sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt)\"
python3 $(PATH_PREFIX)/$(IGVM_TOOL) \
-o $@ \
-kernel $(PATH_PREFIX)/$(KERNEL_PATH) \
-append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(ROOTFS_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) $(shell cat out/rootfs.hash.datablocks) sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt)\" init=/startup.sh" \
-vtl 0
# Rule to make a vhd from a file. This is used to create the rootfs.hash.vhd from rootfs.hash.
%.vhd: % $(TAR2EXT4_TOOL)
$(TAR2EXT4_TOOL) -only-vhd -i $< -o $@
# Rule to make a vhd from an ext4 file. This is used to create the rootfs.vhd from rootfs.ext4.
%.vhd: %.ext4 $(TAR2EXT4_TOOL)
$(TAR2EXT4_TOOL) -only-vhd -i $< -o $@
%.hash %.hash.info %.hash.datablocks %.hash.rootdigest %hash.datablocksize %.hash.datasectors %.hash.hashblocksize: %.ext4 %.hash.salt
veritysetup format --no-superblock --salt $(shell cat out/rootfs.hash.salt) $< $*.hash > $*.hash.info
# Retrieve info required by dm-verity at boot time
# Get the blocksize of rootfs
cat $*.hash.info | awk '/^Root hash:/{ print $$3 }' > $*.hash.rootdigest
cat $*.hash.info | awk '/^Salt:/{ print $$2 }' > $*.hash.salt
cat $*.hash.info | awk '/^Data block size:/{ print $$4 }' > $*.hash.datablocksize
cat $*.hash.info | awk '/^Hash block size:/{ print $$4 }' > $*.hash.hashblocksize
cat $*.hash.info | awk '/^Data blocks:/{ print $$3 }' > $*.hash.datablocks
echo $$(( $$(cat $*.hash.datablocks) * $$(cat $*.hash.datablocksize) / 512 )) > $*.hash.datasectors
out/rootfs.hash.salt:
hexdump -vn32 -e'8/4 "%08X" 1 "\n"' /dev/random > $@
out/rootfs.ext4: out/rootfs.tar.gz $(TAR2EXT4_TOOL)
gzip -f -d ./out/rootfs.tar.gz
$(TAR2EXT4_TOOL) -i ./out/rootfs.tar -o $@
out/rootfs-verity.ext4: out/rootfs.ext4 out/rootfs.hash
cp out/rootfs.ext4 $@
cat out/rootfs.hash >> $@
out/rootfs.tar.gz: out/initrd.img
rm -rf rootfs-conv
mkdir rootfs-conv
gunzip -c out/initrd.img | (cd rootfs-conv && cpio -imd)
tar -zcf $@ -C rootfs-conv .
rm -rf rootfs-conv
out/initrd.img: $(BASE) $(DELTA_TARGET) $(SRCROOT)/hack/catcpio.sh
$(SRCROOT)/hack/catcpio.sh "$(BASE)" $(DELTA_TARGET) > out/initrd.img.uncompressed
gzip -c out/initrd.img.uncompressed > $@
rm out/initrd.img.uncompressed

View File

@@ -44,7 +44,7 @@ delta.tar.gz initrd.img rootfs.tar.gz
### Containerd Shim
For info on the [Runtime V2 API](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md).
For info on the [Runtime V2 API](https://github.com/containerd/containerd/blob/main/core/runtime/v2/README.md).
Contrary to the typical Linux architecture of shim -> runc, the runhcs shim is used both to launch and manage the lifetime of containers.

View File

@@ -63,10 +63,10 @@ func (process *Process) SystemID() string {
}
func (process *Process) processSignalResult(ctx context.Context, err error) (bool, error) {
switch err { //nolint:errorlint
case nil:
if err == nil {
return true, nil
case ErrVmcomputeOperationInvalidState, ErrComputeSystemDoesNotExist, ErrElementNotFound:
}
if errors.Is(err, ErrVmcomputeOperationInvalidState) || errors.Is(err, ErrComputeSystemDoesNotExist) || errors.Is(err, ErrElementNotFound) {
if !process.stopped() {
// The process should be gone, but we have not received the notification.
// After a second, force unblock the process wait to work around a possible
@@ -82,9 +82,8 @@ func (process *Process) processSignalResult(ctx context.Context, err error) (boo
}()
}
return false, nil
default:
return false, err
}
return false, nil
}
// Signal signals the process with `options`.

View File

@@ -24,4 +24,6 @@ type Chipset struct {
// LinuxKernelDirect - Added in v2.2 Builds >=181117
LinuxKernelDirect *LinuxKernelDirect `json:"LinuxKernelDirect,omitempty"`
FirmwareFile *FirmwareFile `json:"FirmwareFile,omitempty"`
}

View File

@@ -9,14 +9,6 @@
package hcsschema
const (
CimMountFlagNone uint32 = 0x0
CimMountFlagChildOnly uint32 = 0x1
CimMountFlagEnableDax uint32 = 0x2
CimMountFlagCacheFiles uint32 = 0x4
CimMountFlagCacheRegions uint32 = 0x8
)
type CimMount struct {
ImagePath string `json:"ImagePath,omitempty"`
FileSystemName string `json:"FileSystemName,omitempty"`

View File

@@ -0,0 +1,8 @@
package hcsschema
type FirmwareFile struct {
// Parameters is an experimental/pre-release field. The field itself or its
// behavior can change in future iterations of the schema. Avoid taking a hard
// dependency on this field.
Parameters []byte `json:"Parameters,omitempty"`
}

View File

@@ -1,49 +0,0 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type Memory2 struct {
SizeInMB uint64 `json:"SizeInMB,omitempty"`
AllowOvercommit bool `json:"AllowOvercommit,omitempty"`
EnableHotHint bool `json:"EnableHotHint,omitempty"`
EnableColdHint bool `json:"EnableColdHint,omitempty"`
EnableEpf bool `json:"EnableEpf,omitempty"`
// EnableDeferredCommit is private in the schema. If regenerated need to add back.
EnableDeferredCommit bool `json:"EnableDeferredCommit,omitempty"`
// EnableColdDiscardHint if enabled, then the memory cold discard hint feature is exposed
// to the VM, allowing it to trim non-zeroed pages from the working set (if supported by
// the guest operating system).
EnableColdDiscardHint bool `json:"EnableColdDiscardHint,omitempty"`
// LowMmioGapInMB is the low MMIO region allocated below 4GB.
//
// TODO: This is pre-release support in schema 2.3. Need to add build number
// docs when a public build with this is out.
LowMMIOGapInMB uint64 `json:"LowMmioGapInMB,omitempty"`
// HighMmioBaseInMB is the high MMIO region allocated above 4GB (base and
// size).
//
// TODO: This is pre-release support in schema 2.3. Need to add build number
// docs when a public build with this is out.
HighMMIOBaseInMB uint64 `json:"HighMmioBaseInMB,omitempty"`
// HighMmioGapInMB is the high MMIO region.
//
// TODO: This is pre-release support in schema 2.3. Need to add build number
// docs when a public build with this is out.
HighMMIOGapInMB uint64 `json:"HighMmioGapInMB,omitempty"`
}

View File

@@ -0,0 +1,21 @@
// Autogenerated code; DO NOT EDIT.
/*
* Schema Open API
*
* No description provided (generated by Swagger Codegen https://github.com/swaggerapi/swaggercodegen)
*
* API version: 2.4
* Generated by: Swagger Codegen (https://github.com/swaggerapi/swaggercodegen.git)
*/
package hcsschema
type MemoryBackingType string
// List of MemoryBackingType
const (
MemoryBackingType_PHYSICAL MemoryBackingType = "Physical"
MemoryBackingType_VIRTUAL MemoryBackingType = "Virtual"
MemoryBackingType_HYBRID MemoryBackingType = "Hybrid"
)

View File

@@ -0,0 +1,19 @@
// Autogenerated code; DO NOT EDIT.
/*
* Schema Open API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.4
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type Numa struct {
VirtualNodeCount uint8 `json:"VirtualNodeCount,omitempty"`
PreferredPhysicalNodes []int64 `json:"PreferredPhysicalNodes,omitempty"`
Settings []NumaSetting `json:"Settings,omitempty"`
MaxSizePerNode uint64 `json:"MaxSizePerNode,omitempty"`
}

View File

@@ -0,0 +1,17 @@
// Autogenerated code; DO NOT EDIT.
/*
* Schema Open API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.4
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type NumaNode struct {
VirtualNodeIndex uint32 `json:"VirtualNodeIndex,omitempty"`
PhysicalNodeIndex uint32 `json:"PhysicalNodeIndex,omitempty"`
}

View File

@@ -0,0 +1,19 @@
// Autogenerated code; DO NOT EDIT.
/*
* Schema Open API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.4
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type NumaNodeMemory struct {
// Total physical memory on on this physical NUMA node that is consumable by the VMs.
TotalConsumableMemoryInPages uint64 `json:"TotalConsumableMemoryInPages,omitempty"`
// Currently available physical memory on this physical NUMA node for the VMs.
AvailableMemoryInPages uint64 `json:"AvailableMemoryInPages,omitempty"`
}

View File

@@ -0,0 +1,17 @@
// Autogenerated code; DO NOT EDIT.
/*
* Schema Open API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.4
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type NumaNodeProcessor struct {
TotalAssignedProcessors uint32 `json:"TotalAssignedProcessors,omitempty"`
TotalAvailableProcessors uint32 `json:"TotalAvailableProcessors,omitempty"`
}

View File

@@ -0,0 +1,21 @@
// Autogenerated code; DO NOT EDIT.
/*
* Schema Open API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.4
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type NumaProcessors struct {
CountPerNode Range `json:"count_per_node,omitempty"`
NodePerSocket uint32 `json:"node_per_socket,omitempty"`
}
type Range struct {
Max uint32 `json:"max,omitempty"`
}

View File

@@ -0,0 +1,21 @@
// Autogenerated code; DO NOT EDIT.
/*
* Schema Open API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.4
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type NumaSetting struct {
VirtualNodeNumber uint32 `json:"VirtualNodeNumber,omitempty"`
PhysicalNodeNumber uint32 `json:"PhysicalNodeNumber,omitempty"`
VirtualSocketNumber uint32 `json:"VirtualSocketNumber,omitempty"`
CountOfProcessors uint32 `json:"CountOfProcessors,omitempty"`
CountOfMemoryBlocks uint64 `json:"CountOfMemoryBlocks,omitempty"`
MemoryBackingType MemoryBackingType `json:"MemoryBackingType,omitempty"`
}

View File

@@ -1,23 +0,0 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.5
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type Processor2 struct {
Count int32 `json:"Count,omitempty"`
Limit int32 `json:"Limit,omitempty"`
Weight int32 `json:"Weight,omitempty"`
ExposeVirtualizationExtensions bool `json:"ExposeVirtualizationExtensions,omitempty"`
// An optional object that configures the CPU Group to which a Virtual Machine is going to bind to.
CpuGroup *CpuGroup `json:"CpuGroup,omitempty"`
}

View File

@@ -26,6 +26,8 @@ type Properties struct {
RuntimeId string `json:"RuntimeId,omitempty"`
SystemGUID string `json:"SystemGUID,omitempty"`
RuntimeTemplateId string `json:"RuntimeTemplateId,omitempty"`
State string `json:"State,omitempty"`

View File

@@ -23,4 +23,5 @@ const (
PTICHeartbeatStatus PropertyType = "ICHeartbeatStatus"
PTProcessorTopology PropertyType = "ProcessorTopology"
PTCPUGroup PropertyType = "CpuGroup"
PTSystemGUID PropertyType = "SystemGUID"
)

View File

@@ -1,16 +1,18 @@
// Autogenerated code; DO NOT EDIT.
/*
* HCS API
* Schema Open API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* API version: 2.4
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type Topology struct {
Memory *Memory2 `json:"Memory,omitempty"`
Processor *Processor2 `json:"Processor,omitempty"`
Memory *VirtualMachineMemory `json:"Memory,omitempty"`
Processor *VirtualMachineProcessor `json:"Processor,omitempty"`
Numa *Numa `json:"Numa,omitempty"`
}

View File

@@ -1,36 +1,29 @@
// Autogenerated code; DO NOT EDIT.
/*
* HCS API
* Schema Open API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* API version: 2.4
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
// Configuration of a virtual machine, used during its creation to set up and/or use resources.
type VirtualMachine struct {
// StopOnReset is private in the schema. If regenerated need to put back.
StopOnReset bool `json:"StopOnReset,omitempty"`
Chipset *Chipset `json:"Chipset,omitempty"`
ComputeTopology *Topology `json:"ComputeTopology,omitempty"`
Devices *Devices `json:"Devices,omitempty"`
GuestState *GuestState `json:"GuestState,omitempty"`
RestoreState *RestoreState `json:"RestoreState,omitempty"`
Version *Version `json:"Version,omitempty"`
// When set to true, the virtual machine will treat a reset as a stop, releasing resources and cleaning up state.
StopOnReset bool `json:"StopOnReset,omitempty"`
Chipset *Chipset `json:"Chipset,omitempty"`
ComputeTopology *Topology `json:"ComputeTopology,omitempty"`
Devices *Devices `json:"Devices,omitempty"`
GuestState *GuestState `json:"GuestState,omitempty"`
RestoreState *RestoreState `json:"RestoreState,omitempty"`
RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"`
StorageQoS *StorageQoS `json:"StorageQoS,omitempty"`
StorageQoS *StorageQoS `json:"StorageQoS,omitempty"`
DebugOptions *DebugOptions `json:"DebugOptions,omitempty"`
GuestConnection *GuestConnection `json:"GuestConnection,omitempty"`
SecuritySettings *SecuritySettings `json:"SecuritySettings,omitempty"`
DebugOptions *DebugOptions `json:"DebugOptions,omitempty"`
SecuritySettings *SecuritySettings `json:"SecuritySettings,omitempty"`
}

View File

@@ -0,0 +1,33 @@
// Autogenerated code; DO NOT EDIT.
/*
* Schema Open API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.4
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type VirtualMachineMemory struct {
SizeInMB uint64 `json:"SizeInMB,omitempty"`
Backing *MemoryBackingType `json:"Backing,omitempty"`
// If enabled, then the VM's memory is backed by the Windows pagefile rather than physically backed, statically allocated memory.
AllowOvercommit bool `json:"AllowOvercommit,omitempty"`
// If enabled, then the memory hot hint feature is exposed to the VM, allowing it to prefetch pages into its working set. (if supported by the guest operating system).
EnableHotHint bool `json:"EnableHotHint,omitempty"`
// If enabled, then the memory cold hint feature is exposed to the VM, allowing it to trim zeroed pages from its working set (if supported by the guest operating system).
EnableColdHint bool `json:"EnableColdHint,omitempty"`
// If enabled, then the memory cold discard hint feature is exposed to the VM, allowing it to trim non-zeroed pages from the working set (if supported by the guest operating system).
EnableColdDiscardHint bool `json:"EnableColdDiscardHint,omitempty"`
// If enabled, then commit is not charged for each backing page until first access.
EnableDeferredCommit bool `json:"EnableDeferredCommit,omitempty"`
// Low MMIO region allocated below 4GB
LowMMIOGapInMB uint64 `json:"LowMmioGapInMB,omitempty"`
// High MMIO region allocated above 4GB (base and size)
HighMMIOBaseInMB uint64 `json:"HighMmioBaseInMB,omitempty"`
HighMMIOGapInMB uint64 `json:"HighMmioGapInMB,omitempty"`
SlitType *VirtualSlitType `json:"SlitType,omitempty"`
}

View File

@@ -0,0 +1,21 @@
// Autogenerated code; DO NOT EDIT.
/*
* Schema Open API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.4
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type VirtualMachineProcessor struct {
Count uint32 `json:"Count,omitempty"`
Limit uint64 `json:"Limit,omitempty"`
Weight uint64 `json:"Weight,omitempty"`
Reservation uint64 `json:"Reservation,omitempty"`
CpuGroup *CpuGroup `json:"CpuGroup,omitempty"`
NumaProcessorsSettings *NumaProcessors `json:"NumaProcessorsSettings,omitempty"`
}

View File

@@ -9,8 +9,9 @@
package hcsschema
// TODO: This is pre-release support in schema 2.3. Need to add build number
// TODO: PropagateNumaAffinity is pre-release/experimental field in schema 2.11. Need to add build number
// docs when a public build with this is out.
type VirtualPciDevice struct {
Functions []VirtualPciFunction `json:",omitempty"`
PropagateNumaAffinity *bool `json:"PropagateNumaAffinity,omitempty"`
}

View File

@@ -0,0 +1,23 @@
// Autogenerated code; DO NOT EDIT.
/*
* Schema Open API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.4
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
// VirtualSlitType : Indicates if a virtual SLIT should ne enabled for a VM and the type of virtual SLIT to be enabled.
type VirtualSlitType string
// List of VirtualSlitType
const (
VirtualSlitType_NONE VirtualSlitType = "None"
VirtualSlitType_FIRMWARE VirtualSlitType = "Firmware"
VirtualSlitType_MEASURED VirtualSlitType = "Measured"
VirtualSlitType_FIRMWARE_FALLBACK_MEASURED VirtualSlitType = "FirmwareFallbackMeasured"
)

View File

@@ -13,4 +13,6 @@ type WindowsCrashReporting struct {
DumpFileName string `json:"DumpFileName,omitempty"`
MaxDumpSize int64 `json:"MaxDumpSize,omitempty"`
DumpType string `json:"DumpType,omitempty"`
}

View File

@@ -238,9 +238,10 @@ func (computeSystem *System) Shutdown(ctx context.Context) error {
resultJSON, err := vmcompute.HcsShutdownComputeSystem(ctx, computeSystem.handle, "")
events := processHcsResult(ctx, resultJSON)
switch err { //nolint:errorlint
case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending:
default:
if err != nil &&
!errors.Is(err, ErrVmcomputeAlreadyStopped) &&
!errors.Is(err, ErrComputeSystemDoesNotExist) &&
!errors.Is(err, ErrVmcomputeOperationPending) {
return makeSystemError(computeSystem, operation, err, events)
}
return nil
@@ -259,9 +260,10 @@ func (computeSystem *System) Terminate(ctx context.Context) error {
resultJSON, err := vmcompute.HcsTerminateComputeSystem(ctx, computeSystem.handle, "")
events := processHcsResult(ctx, resultJSON)
switch err { //nolint:errorlint
case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending:
default:
if err != nil &&
!errors.Is(err, ErrVmcomputeAlreadyStopped) &&
!errors.Is(err, ErrComputeSystemDoesNotExist) &&
!errors.Is(err, ErrVmcomputeOperationPending) {
return makeSystemError(computeSystem, operation, err, events)
}
return nil
@@ -279,14 +281,13 @@ func (computeSystem *System) waitBackground() {
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
err := waitForNotification(ctx, computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
switch err { //nolint:errorlint
case nil:
if err == nil {
log.G(ctx).Debug("system exited")
case ErrVmcomputeUnexpectedExit:
} else if errors.Is(err, ErrVmcomputeUnexpectedExit) {
log.G(ctx).Debug("unexpected system exit")
computeSystem.exitError = makeSystemError(computeSystem, operation, err, nil)
err = nil
default:
} else {
err = makeSystemError(computeSystem, operation, err, nil)
}
computeSystem.closedWaitOnce.Do(func() {

View File

@@ -47,7 +47,7 @@ func (nnvManagementMacList *HNSNnvManagementMacList) Set() (*HNSNnvManagementMac
func GetNnvManagementMacAddressList() (*HNSNnvManagementMacList, error) {
operation := "Get"
title := "hcsshim::nnvManagementMacList::" + operation
logrus.Debugf(title)
logrus.Debug(title)
return HNSNnvManagementMacRequest("GET", "", "")
}
@@ -55,6 +55,6 @@ func GetNnvManagementMacAddressList() (*HNSNnvManagementMacList, error) {
func DeleteNnvManagementMacAddressList() (*HNSNnvManagementMacList, error) {
operation := "Delete"
title := "hcsshim::nnvManagementMacList::" + operation
logrus.Debugf(title)
logrus.Debug(title)
return HNSNnvManagementMacRequest("DELETE", "", "")
}

View File

@@ -22,9 +22,8 @@ import (
// of the job and a mutex for synchronized handle access.
type JobObject struct {
handle windows.Handle
// All accesses to this MUST be done atomically except in `Open` as the object
// is being created in the function. 1 signifies that this job is currently a silo.
silo uint32
// silo signifies that this job is currently a silo.
silo atomic.Bool
mq *queue.MessageQueue
handleLock sync.RWMutex
}
@@ -204,9 +203,7 @@ func Open(ctx context.Context, options *Options) (_ *JobObject, err error) {
handle: jobHandle,
}
if isJobSilo(jobHandle) {
job.silo = 1
}
job.silo.Store(isJobSilo(jobHandle))
// If the IOCP we'll be using to receive messages for all jobs hasn't been
// created, create it and start polling.
@@ -479,7 +476,7 @@ func (job *JobObject) ApplyFileBinding(root, target string, readOnly bool) error
return ErrAlreadyClosed
}
if !job.isSilo() {
if !job.silo.Load() {
return ErrNotSilo
}
@@ -546,7 +543,7 @@ func (job *JobObject) PromoteToSilo() error {
return ErrAlreadyClosed
}
if job.isSilo() {
if job.silo.Load() {
return nil
}
@@ -569,15 +566,10 @@ func (job *JobObject) PromoteToSilo() error {
return fmt.Errorf("failed to promote job to silo: %w", err)
}
atomic.StoreUint32(&job.silo, 1)
job.silo.Store(true)
return nil
}
// isSilo returns if the job object is a silo.
func (job *JobObject) isSilo() bool {
return atomic.LoadUint32(&job.silo) == 1
}
// QueryPrivateWorkingSet returns the private working set size for the job. This is calculated by adding up the
// private working set for every process running in the job.
func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) {

View File

@@ -150,6 +150,7 @@ func (job *JobObject) SetCPUAffinity(affinityBitMask uint64) error {
return fmt.Errorf("affinity bitmask (%d) exceeds max allowable value (%d)", affinityBitMask, maxUintptr)
}
// CodeQL [SM03681] checked against max value above (there is no math.MaxUintPtr ...)
info.BasicLimitInformation.Affinity = uintptr(affinityBitMask)
return job.setExtendedInformation(info)
}

View File

@@ -4,7 +4,6 @@ import (
"context"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
type entryContextKeyType int
@@ -20,13 +19,13 @@ var (
// Instead, use `L.With*` or `L.Dup()`. Or `G(context.Background())`.
L = logrus.NewEntry(logrus.StandardLogger())
// G is an alias for GetEntry
// G is an alias for GetEntry.
G = GetEntry
// S is an alias for SetEntry
// S is an alias for SetEntry.
S = SetEntry
// U is an alias for UpdateContext
// U is an alias for UpdateContext.
U = UpdateContext
)
@@ -83,7 +82,7 @@ func UpdateContext(ctx context.Context) context.Context {
// WithContext returns a context that contains the provided log entry.
// The entry can be extracted with `GetEntry` (`G`)
//
// The entry in the context is a copy of `entry` (generated by `entry.WithContext`)
// The entry in the context is a copy of `entry` (generated by `entry.WithContext`).
func WithContext(ctx context.Context, entry *logrus.Entry) (context.Context, *logrus.Entry) {
// regardless of the order, entry.Context != GetEntry(ctx)
// here, the returned entry will reference the supplied context
@@ -93,25 +92,6 @@ func WithContext(ctx context.Context, entry *logrus.Entry) (context.Context, *lo
return ctx, entry
}
// Copy extracts the tracing Span and logging entry from the src Context, if they
// exist, and adds them to the dst Context.
//
// This is useful to share tracing and logging between contexts, but not the
// cancellation. For example, if the src Context has been cancelled but cleanup
// operations triggered by the cancellation require a non-cancelled context to
// execute.
func Copy(dst context.Context, src context.Context) context.Context {
if s := trace.FromContext(src); s != nil {
dst = trace.NewContext(dst, s)
}
if e := fromContext(src); e != nil {
dst, _ = WithContext(dst, e)
}
return dst
}
func fromContext(ctx context.Context) *logrus.Entry {
e, _ := ctx.Value(_entryContextKey).(*logrus.Entry)
return e

View File

@@ -103,9 +103,7 @@ func encode(v interface{}) (_ []byte, err error) {
if jErr := enc.Encode(v); jErr != nil {
if err != nil {
// TODO (go1.20): use multierror via fmt.Errorf("...: %w; ...: %w", ...)
//nolint:errorlint // non-wrapping format verb for fmt.Errorf
return nil, fmt.Errorf("protojson encoding: %v; json encoding: %w", err, jErr)
return nil, fmt.Errorf("protojson encoding: %w; json encoding: %w", err, jErr)
}
return nil, fmt.Errorf("json encoding: %w", jErr)
}

View File

@@ -82,7 +82,7 @@ func (h *Hook) encode(e *logrus.Entry) {
formatTime := h.TimeFormat != ""
formatDuration := h.DurationFormat != nil
if !(h.EncodeAsJSON || formatTime || formatDuration) {
if !h.EncodeAsJSON && !formatTime && !formatDuration {
return
}

View File

@@ -22,23 +22,14 @@ var (
// case sensitive keywords, so "env" is not a substring on "Environment"
_scrubKeywords = [][]byte{[]byte("env"), []byte("Environment")}
_scrub int32
_scrub atomic.Bool
)
// SetScrubbing enables scrubbing
func SetScrubbing(enable bool) {
v := int32(0) // cant convert from bool to int32 directly
if enable {
v = 1
}
atomic.StoreInt32(&_scrub, v)
}
func SetScrubbing(enable bool) { _scrub.Store(enable) }
// IsScrubbingEnabled checks if scrubbing is enabled
func IsScrubbingEnabled() bool {
v := atomic.LoadInt32(&_scrub)
return v != 0
}
func IsScrubbingEnabled() bool { return _scrub.Load() }
// ScrubProcessParameters scrubs HCS Create Process requests with config parameters of
// type internal/hcs/schema2.ScrubProcessParameters (aka hcsshema.ScrubProcessParameters)

View File

@@ -68,15 +68,15 @@ func (le *LogrusExporter) ExportSpan(s *trace.SpanData) {
}
level := logrus.InfoLevel
if s.Status.Code != 0 {
if s.Code != 0 {
level = logrus.ErrorLevel
// don't overwrite an existing "error" or "errorCode" attributes
if _, ok := data[logrus.ErrorKey]; !ok {
data[logrus.ErrorKey] = s.Status.Message
data[logrus.ErrorKey] = s.Message
}
if _, ok := data[_errorCodeKey]; !ok {
data[_errorCodeKey] = codes.Code(s.Status.Code).String()
data[_errorCodeKey] = codes.Code(s.Code).String()
}
}

View File

@@ -31,13 +31,13 @@ type ModificationRequest struct {
}
type NetworkModifyRequest struct {
AdapterId string `json:"AdapterId,omitempty"` //nolint:stylecheck
AdapterId string `json:"AdapterId,omitempty"` //nolint:staticcheck // ST1003: ALL_CAPS
RequestType RequestType `json:"RequestType,omitempty"`
Settings interface{} `json:"Settings,omitempty"`
}
type RS4NetworkModifyRequest struct {
AdapterInstanceId string `json:"AdapterInstanceId,omitempty"` //nolint:stylecheck
AdapterInstanceId string `json:"AdapterInstanceId,omitempty"` //nolint:staticcheck // ST1003: ALL_CAPS
RequestType RequestType `json:"RequestType,omitempty"`
Settings interface{} `json:"Settings,omitempty"`
}

View File

@@ -18,6 +18,8 @@ import (
winio "github.com/Microsoft/go-winio"
)
// TODO(go1.24): use [os.Root] and co. here
func OpenRoot(path string) (*os.File, error) {
longpath, err := longpath.LongAbs(path)
if err != nil {

View File

@@ -63,8 +63,7 @@ const (
shareModeRead shareMode = 0x1
shareModeWrite shareMode = 0x2
//nolint:stylecheck // ST1003
sidVmGroup = "S-1-5-83-0"
sidVMGroup = "S-1-5-83-0"
trusteeFormIsSid trusteeForm = 0
@@ -75,13 +74,13 @@ const (
// include Grant ACE entries for the VM Group SID. This is a golang re-
// implementation of the same function in vmcompute, just not exported in
// RS5. Which kind of sucks. Sucks a lot :/
func GrantVmGroupAccess(name string) error { //nolint:stylecheck // ST1003
func GrantVmGroupAccess(name string) error { //nolint:staticcheck // ST1003: ALL_CAPS
return GrantVmGroupAccessWithMask(name, accessMaskDesiredPermission)
}
// GrantVmGroupAccessWithMask sets the desired DACL for a specified file or
// directory.
func GrantVmGroupAccessWithMask(name string, access accessMask) error { //nolint:stylecheck // ST1003
func GrantVmGroupAccessWithMask(name string, access accessMask) error { //nolint:staticcheck // ST1003: ALL_CAPS
if access == 0 || access<<4 != 0 {
return fmt.Errorf("invalid access mask: 0x%08x", access)
}
@@ -154,9 +153,9 @@ func createFile(name string, isDir bool) (syscall.Handle, error) {
// The caller is responsible for LocalFree of the returned DACL on success.
func generateDACLWithAcesAdded(name string, isDir bool, desiredAccess accessMask, origDACL uintptr) (uintptr, error) {
// Generate pointers to the SIDs based on the string SIDs
sid, err := syscall.StringToSid(sidVmGroup)
sid, err := syscall.StringToSid(sidVMGroup)
if err != nil {
return 0, fmt.Errorf("%s syscall.StringToSid %s %s: %w", gvmga, name, sidVmGroup, err)
return 0, fmt.Errorf("%s syscall.StringToSid %s %s: %w", gvmga, name, sidVMGroup, err)
}
inheritance := inheritModeNoInheritance

View File

@@ -104,7 +104,7 @@ func execute(ctx gcontext.Context, timeout time.Duration, f func() error) error
}()
select {
case <-ctx.Done():
if ctx.Err() == gcontext.DeadlineExceeded { //nolint:errorlint
if ctx.Err() == gcontext.DeadlineExceeded {
log.G(ctx).WithField(logfields.Timeout, trueTimeout).
Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. " +
"If it appears to be making no forward progress, obtain the stacks and see if there is a syscall " +

View File

@@ -70,7 +70,7 @@ func (r *legacyLayerWriterWrapper) Close() (err error) {
defer r.s.End()
defer func() { oc.SetSpanStatus(r.s, err) }()
defer os.RemoveAll(r.root.Name())
defer r.legacyLayerWriter.CloseRoots()
defer r.CloseRoots()
err = r.legacyLayerWriter.Close()
if err != nil {

View File

@@ -34,6 +34,7 @@ const (
UtilityVMPath = `UtilityVM`
UtilityVMFilesPath = `UtilityVM\Files`
RegFilesPath = `Files\Windows\System32\config`
BootDirRelativePath = `\EFI\Microsoft\Boot`
BcdFilePath = `UtilityVM\Files\EFI\Microsoft\Boot\BCD`
BootMgrFilePath = `UtilityVM\Files\EFI\Microsoft\Boot\bootmgfw.efi`
ContainerBaseVhd = `blank-base.vhdx`

View File

@@ -32,10 +32,16 @@ type CimFsFileMetadata struct {
EACount uint32
}
type CimFsImagePath struct {
ImageDir *uint16
ImageName *uint16
}
//sys CimMountImage(imagePath string, fsName string, flags uint32, volumeID *g) (hr error) = cimfs.CimMountImage?
//sys CimDismountImage(volumeID *g) (hr error) = cimfs.CimDismountImage?
//sys CimCreateImage(imagePath string, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) = cimfs.CimCreateImage?
//sys CimCreateImage2(imagePath string, flags uint32, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) = cimfs.CimCreateImage2?
//sys CimCloseImage(cimFSHandle FsHandle) = cimfs.CimCloseImage?
//sys CimCommitImage(cimFSHandle FsHandle) (hr error) = cimfs.CimCommitImage?
@@ -45,3 +51,11 @@ type CimFsFileMetadata struct {
//sys CimDeletePath(cimFSHandle FsHandle, path string) (hr error) = cimfs.CimDeletePath?
//sys CimCreateHardLink(cimFSHandle FsHandle, newPath string, oldPath string) (hr error) = cimfs.CimCreateHardLink?
//sys CimCreateAlternateStream(cimFSHandle FsHandle, path string, size uint64, cimStreamHandle *StreamHandle) (hr error) = cimfs.CimCreateAlternateStream?
//sys CimAddFsToMergedImage(cimFSHandle FsHandle, path string) (hr error) = cimfs.CimAddFsToMergedImage?
//sys CimAddFsToMergedImage2(cimFSHandle FsHandle, path string, flags uint32) (hr error) = cimfs.CimAddFsToMergedImage2?
//sys CimMergeMountImage(numCimPaths uint32, backingImagePaths *CimFsImagePath, flags uint32, volumeID *g) (hr error) = cimfs.CimMergeMountImage?
//sys CimTombstoneFile(cimFSHandle FsHandle, path string) (hr error) = cimfs.CimTombstoneFile?
//sys CimCreateMergeLink(cimFSHandle FsHandle, newPath string, oldPath string) (hr error) = cimfs.CimCreateMergeLink?
//sys CimSealImage(blockCimPath string, hashSize *uint64, fixedHeaderSize *uint64, hash *byte) (hr error) = cimfs.CimSealImage?
//sys CimGetVerificationInformation(blockCimPath string, isSealed *uint32, hashSize *uint64, signatureSize *uint64, fixedHeaderSize *uint64, hash *byte, signature *byte) (hr error) = cimfs.CimGetVerificationInformation?
//sys CimMountVerifiedImage(imagePath string, fsName string, flags uint32, volumeID *g, hashSize uint16, hash *byte) (hr error) = cimfs.CimMountVerifiedImage?

View File

@@ -4,6 +4,8 @@ package winapi
import "github.com/Microsoft/go-winio/pkg/guid"
//sys CMGetDeviceInterfaceListSize(listlen *uint32, classGUID *g, deviceID *uint16, ulFlags uint32) (hr error) = cfgmgr32.CM_Get_Device_Interface_List_SizeW
//sys CMGetDeviceInterfaceList(classGUID *g, deviceID *uint16, buffer *uint16, bufLen uint32, ulFlags uint32) (hr error) = cfgmgr32.CM_Get_Device_Interface_ListW
//sys CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) = cfgmgr32.CM_Get_Device_ID_List_SizeA
//sys CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error)= cfgmgr32.CM_Get_Device_ID_ListA
//sys CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) = cfgmgr32.CM_Locate_DevNodeW

View File

@@ -52,7 +52,11 @@ var (
procCM_Get_DevNode_PropertyW = modcfgmgr32.NewProc("CM_Get_DevNode_PropertyW")
procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA")
procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA")
procCM_Get_Device_Interface_ListW = modcfgmgr32.NewProc("CM_Get_Device_Interface_ListW")
procCM_Get_Device_Interface_List_SizeW = modcfgmgr32.NewProc("CM_Get_Device_Interface_List_SizeW")
procCM_Locate_DevNodeW = modcfgmgr32.NewProc("CM_Locate_DevNodeW")
procCimAddFsToMergedImage = modcimfs.NewProc("CimAddFsToMergedImage")
procCimAddFsToMergedImage2 = modcimfs.NewProc("CimAddFsToMergedImage2")
procCimCloseImage = modcimfs.NewProc("CimCloseImage")
procCimCloseStream = modcimfs.NewProc("CimCloseStream")
procCimCommitImage = modcimfs.NewProc("CimCommitImage")
@@ -60,9 +64,16 @@ var (
procCimCreateFile = modcimfs.NewProc("CimCreateFile")
procCimCreateHardLink = modcimfs.NewProc("CimCreateHardLink")
procCimCreateImage = modcimfs.NewProc("CimCreateImage")
procCimCreateImage2 = modcimfs.NewProc("CimCreateImage2")
procCimCreateMergeLink = modcimfs.NewProc("CimCreateMergeLink")
procCimDeletePath = modcimfs.NewProc("CimDeletePath")
procCimDismountImage = modcimfs.NewProc("CimDismountImage")
procCimGetVerificationInformation = modcimfs.NewProc("CimGetVerificationInformation")
procCimMergeMountImage = modcimfs.NewProc("CimMergeMountImage")
procCimMountImage = modcimfs.NewProc("CimMountImage")
procCimMountVerifiedImage = modcimfs.NewProc("CimMountVerifiedImage")
procCimSealImage = modcimfs.NewProc("CimSealImage")
procCimTombstoneFile = modcimfs.NewProc("CimTombstoneFile")
procCimWriteStream = modcimfs.NewProc("CimWriteStream")
procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId")
procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole")
@@ -161,6 +172,28 @@ func CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr e
return
}
func CMGetDeviceInterfaceList(classGUID *g, deviceID *uint16, buffer *uint16, bufLen uint32, ulFlags uint32) (hr error) {
r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_ListW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufLen), uintptr(ulFlags))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func CMGetDeviceInterfaceListSize(listlen *uint32, classGUID *g, deviceID *uint16, ulFlags uint32) (hr error) {
r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_List_SizeW.Addr(), uintptr(unsafe.Pointer(listlen)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(deviceID)), uintptr(ulFlags))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(pDeviceID)
@@ -181,6 +214,54 @@ func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr
return
}
func CimAddFsToMergedImage(cimFSHandle FsHandle, path string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(path)
if hr != nil {
return
}
return _CimAddFsToMergedImage(cimFSHandle, _p0)
}
func _CimAddFsToMergedImage(cimFSHandle FsHandle, path *uint16) (hr error) {
hr = procCimAddFsToMergedImage.Find()
if hr != nil {
return
}
r0, _, _ := syscall.SyscallN(procCimAddFsToMergedImage.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func CimAddFsToMergedImage2(cimFSHandle FsHandle, path string, flags uint32) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(path)
if hr != nil {
return
}
return _CimAddFsToMergedImage2(cimFSHandle, _p0, flags)
}
func _CimAddFsToMergedImage2(cimFSHandle FsHandle, path *uint16, flags uint32) (hr error) {
hr = procCimAddFsToMergedImage2.Find()
if hr != nil {
return
}
r0, _, _ := syscall.SyscallN(procCimAddFsToMergedImage2.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(flags))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func CimCloseImage(cimFSHandle FsHandle) (err error) {
err = procCimCloseImage.Find()
if err != nil {
@@ -321,6 +402,59 @@ func _CimCreateImage(imagePath *uint16, oldFSName *uint16, newFSName *uint16, ci
return
}
func CimCreateImage2(imagePath string, flags uint32, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(imagePath)
if hr != nil {
return
}
return _CimCreateImage2(_p0, flags, oldFSName, newFSName, cimFSHandle)
}
func _CimCreateImage2(imagePath *uint16, flags uint32, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) {
hr = procCimCreateImage2.Find()
if hr != nil {
return
}
r0, _, _ := syscall.SyscallN(procCimCreateImage2.Addr(), uintptr(unsafe.Pointer(imagePath)), uintptr(flags), uintptr(unsafe.Pointer(oldFSName)), uintptr(unsafe.Pointer(newFSName)), uintptr(unsafe.Pointer(cimFSHandle)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func CimCreateMergeLink(cimFSHandle FsHandle, newPath string, oldPath string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(newPath)
if hr != nil {
return
}
var _p1 *uint16
_p1, hr = syscall.UTF16PtrFromString(oldPath)
if hr != nil {
return
}
return _CimCreateMergeLink(cimFSHandle, _p0, _p1)
}
func _CimCreateMergeLink(cimFSHandle FsHandle, newPath *uint16, oldPath *uint16) (hr error) {
hr = procCimCreateMergeLink.Find()
if hr != nil {
return
}
r0, _, _ := syscall.SyscallN(procCimCreateMergeLink.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(newPath)), uintptr(unsafe.Pointer(oldPath)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func CimDeletePath(cimFSHandle FsHandle, path string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(path)
@@ -360,6 +494,45 @@ func CimDismountImage(volumeID *g) (hr error) {
return
}
func CimGetVerificationInformation(blockCimPath string, isSealed *uint32, hashSize *uint64, signatureSize *uint64, fixedHeaderSize *uint64, hash *byte, signature *byte) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(blockCimPath)
if hr != nil {
return
}
return _CimGetVerificationInformation(_p0, isSealed, hashSize, signatureSize, fixedHeaderSize, hash, signature)
}
func _CimGetVerificationInformation(blockCimPath *uint16, isSealed *uint32, hashSize *uint64, signatureSize *uint64, fixedHeaderSize *uint64, hash *byte, signature *byte) (hr error) {
hr = procCimGetVerificationInformation.Find()
if hr != nil {
return
}
r0, _, _ := syscall.SyscallN(procCimGetVerificationInformation.Addr(), uintptr(unsafe.Pointer(blockCimPath)), uintptr(unsafe.Pointer(isSealed)), uintptr(unsafe.Pointer(hashSize)), uintptr(unsafe.Pointer(signatureSize)), uintptr(unsafe.Pointer(fixedHeaderSize)), uintptr(unsafe.Pointer(hash)), uintptr(unsafe.Pointer(signature)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func CimMergeMountImage(numCimPaths uint32, backingImagePaths *CimFsImagePath, flags uint32, volumeID *g) (hr error) {
hr = procCimMergeMountImage.Find()
if hr != nil {
return
}
r0, _, _ := syscall.SyscallN(procCimMergeMountImage.Addr(), uintptr(numCimPaths), uintptr(unsafe.Pointer(backingImagePaths)), uintptr(flags), uintptr(unsafe.Pointer(volumeID)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func CimMountImage(imagePath string, fsName string, flags uint32, volumeID *g) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(imagePath)
@@ -389,6 +562,83 @@ func _CimMountImage(imagePath *uint16, fsName *uint16, flags uint32, volumeID *g
return
}
func CimMountVerifiedImage(imagePath string, fsName string, flags uint32, volumeID *g, hashSize uint16, hash *byte) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(imagePath)
if hr != nil {
return
}
var _p1 *uint16
_p1, hr = syscall.UTF16PtrFromString(fsName)
if hr != nil {
return
}
return _CimMountVerifiedImage(_p0, _p1, flags, volumeID, hashSize, hash)
}
func _CimMountVerifiedImage(imagePath *uint16, fsName *uint16, flags uint32, volumeID *g, hashSize uint16, hash *byte) (hr error) {
hr = procCimMountVerifiedImage.Find()
if hr != nil {
return
}
r0, _, _ := syscall.SyscallN(procCimMountVerifiedImage.Addr(), uintptr(unsafe.Pointer(imagePath)), uintptr(unsafe.Pointer(fsName)), uintptr(flags), uintptr(unsafe.Pointer(volumeID)), uintptr(hashSize), uintptr(unsafe.Pointer(hash)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func CimSealImage(blockCimPath string, hashSize *uint64, fixedHeaderSize *uint64, hash *byte) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(blockCimPath)
if hr != nil {
return
}
return _CimSealImage(_p0, hashSize, fixedHeaderSize, hash)
}
func _CimSealImage(blockCimPath *uint16, hashSize *uint64, fixedHeaderSize *uint64, hash *byte) (hr error) {
hr = procCimSealImage.Find()
if hr != nil {
return
}
r0, _, _ := syscall.SyscallN(procCimSealImage.Addr(), uintptr(unsafe.Pointer(blockCimPath)), uintptr(unsafe.Pointer(hashSize)), uintptr(unsafe.Pointer(fixedHeaderSize)), uintptr(unsafe.Pointer(hash)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func CimTombstoneFile(cimFSHandle FsHandle, path string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(path)
if hr != nil {
return
}
return _CimTombstoneFile(cimFSHandle, _p0)
}
func _CimTombstoneFile(cimFSHandle FsHandle, path *uint16) (hr error) {
hr = procCimTombstoneFile.Find()
if hr != nil {
return
}
r0, _, _ := syscall.SyscallN(procCimTombstoneFile.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func CimWriteStream(cimStreamHandle StreamHandle, buffer uintptr, bufferSize uint32) (hr error) {
hr = procCimWriteStream.Find()
if hr != nil {

View File

@@ -3,7 +3,8 @@ package osversion
// List of stable ABI compliant ltsc releases
// Note: List must be sorted in ascending order
var compatLTSCReleases = []uint16{
V21H2Server,
LTSC2022,
LTSC2025,
}
// CheckHostAndContainerCompat checks if given host and container
@@ -20,16 +21,25 @@ func CheckHostAndContainerCompat(host, ctr OSVersion) bool {
}
// If host is < WS 2022, exact version match is required
if host.Build < V21H2Server {
if host.Build < LTSC2022 {
return host.Build == ctr.Build
}
var supportedLtscRelease uint16
// Find the latest LTSC version that is earlier than the host version.
// This is the earliest version of container that the host can run.
//
// If the host version is an LTSC, then it supports compatibility with
// everything from the previous LTSC up to itself, so we want supportedLTSCRelease
// to be the previous entry.
//
// If no match is found, then we know that the host is LTSC2022 exactly,
// since we already checked that it's not less than LTSC2022.
var supportedLTSCRelease uint16 = LTSC2022
for i := len(compatLTSCReleases) - 1; i >= 0; i-- {
if host.Build >= compatLTSCReleases[i] {
supportedLtscRelease = compatLTSCReleases[i]
if host.Build > compatLTSCReleases[i] {
supportedLTSCRelease = compatLTSCReleases[i]
break
}
}
return ctr.Build >= supportedLtscRelease && ctr.Build <= host.Build
return supportedLTSCRelease <= ctr.Build && ctr.Build <= host.Build
}

View File

@@ -81,4 +81,11 @@ const (
// V22H2Win11 corresponds to Windows 11 (2022 Update).
V22H2Win11 = 22621
// V23H2 is the 23H2 release in the Windows Server annual channel.
V23H2 = 25398
// Windows Server 2025 build 26100
V25H1Server = 26100
LTSC2025 = V25H1Server
)

View File

@@ -0,0 +1,96 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package errhttp provides utility functions for translating errors to
// and from a HTTP context.
//
// The functions ToHTTP and ToNative can be used to map server-side and
// client-side errors to the correct types.
package errhttp
import (
"errors"
"net/http"
"github.com/containerd/errdefs"
"github.com/containerd/errdefs/pkg/internal/cause"
)
// ToHTTP returns the best status code for the given error
func ToHTTP(err error) int {
switch {
case errdefs.IsNotFound(err):
return http.StatusNotFound
case errdefs.IsInvalidArgument(err):
return http.StatusBadRequest
case errdefs.IsConflict(err):
return http.StatusConflict
case errdefs.IsNotModified(err):
return http.StatusNotModified
case errdefs.IsFailedPrecondition(err):
return http.StatusPreconditionFailed
case errdefs.IsUnauthorized(err):
return http.StatusUnauthorized
case errdefs.IsPermissionDenied(err):
return http.StatusForbidden
case errdefs.IsResourceExhausted(err):
return http.StatusTooManyRequests
case errdefs.IsInternal(err):
return http.StatusInternalServerError
case errdefs.IsNotImplemented(err):
return http.StatusNotImplemented
case errdefs.IsUnavailable(err):
return http.StatusServiceUnavailable
case errdefs.IsUnknown(err):
var unexpected cause.ErrUnexpectedStatus
if errors.As(err, &unexpected) && unexpected.Status >= 200 && unexpected.Status < 600 {
return unexpected.Status
}
return http.StatusInternalServerError
default:
return http.StatusInternalServerError
}
}
// ToNative returns the error best matching the HTTP status code
func ToNative(statusCode int) error {
switch statusCode {
case http.StatusNotFound:
return errdefs.ErrNotFound
case http.StatusBadRequest:
return errdefs.ErrInvalidArgument
case http.StatusConflict:
return errdefs.ErrConflict
case http.StatusPreconditionFailed:
return errdefs.ErrFailedPrecondition
case http.StatusUnauthorized:
return errdefs.ErrUnauthenticated
case http.StatusForbidden:
return errdefs.ErrPermissionDenied
case http.StatusNotModified:
return errdefs.ErrNotModified
case http.StatusTooManyRequests:
return errdefs.ErrResourceExhausted
case http.StatusInternalServerError:
return errdefs.ErrInternal
case http.StatusNotImplemented:
return errdefs.ErrNotImplemented
case http.StatusServiceUnavailable:
return errdefs.ErrUnavailable
default:
return cause.ErrUnexpectedStatus{Status: statusCode}
}
}

View File

@@ -35,6 +35,7 @@ import (
"runtime"
"strings"
"sync"
"sync/atomic"
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
"github.com/klauspost/compress/zstd"
@@ -42,6 +43,8 @@ import (
"golang.org/x/sync/errgroup"
)
type GzipHelperFunc func(io.Reader) (io.ReadCloser, error)
type options struct {
chunkSize int
compressionLevel int
@@ -50,6 +53,7 @@ type options struct {
compression Compression
ctx context.Context
minChunkSize int
gzipHelperFunc GzipHelperFunc
}
type Option func(o *options) error
@@ -127,11 +131,25 @@ func WithMinChunkSize(minChunkSize int) Option {
}
}
// WithGzipHelperFunc option specifies a custom function to decompress gzip-compressed layers.
// When a gzip-compressed layer is detected, this function will be used instead of the
// Go standard library gzip decompression for better performance.
// The function should take an io.Reader as input and return an io.ReadCloser.
// If nil, the Go standard library gzip.NewReader will be used.
func WithGzipHelperFunc(gzipHelperFunc GzipHelperFunc) Option {
return func(o *options) error {
o.gzipHelperFunc = gzipHelperFunc
return nil
}
}
// Blob is an eStargz blob.
type Blob struct {
io.ReadCloser
diffID digest.Digester
tocDigest digest.Digest
diffID digest.Digester
tocDigest digest.Digest
readCompleted *atomic.Bool
uncompressedSize *atomic.Int64
}
// DiffID returns the digest of uncompressed blob.
@@ -145,6 +163,19 @@ func (b *Blob) TOCDigest() digest.Digest {
return b.tocDigest
}
// UncompressedSize returns the size of uncompressed blob.
// UncompressedSize should only be called after the blob has been fully read.
func (b *Blob) UncompressedSize() (int64, error) {
switch {
case b.uncompressedSize == nil || b.readCompleted == nil:
return -1, fmt.Errorf("readCompleted or uncompressedSize is not initialized")
case !b.readCompleted.Load():
return -1, fmt.Errorf("called UncompressedSize before the blob has been fully read")
default:
return b.uncompressedSize.Load(), nil
}
}
// Build builds an eStargz blob which is an extended version of stargz, from a blob (gzip, zstd
// or plain tar) passed through the argument. If there are some prioritized files are listed in
// the option, these files are grouped as "prioritized" and can be used for runtime optimization
@@ -186,7 +217,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
rErr = fmt.Errorf("error from context %q: %w", cErr, rErr)
}
}()
tarBlob, err := decompressBlob(tarBlob, layerFiles)
tarBlob, err := decompressBlob(tarBlob, layerFiles, opts.gzipHelperFunc)
if err != nil {
return nil, err
}
@@ -252,17 +283,28 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
}
diffID := digest.Canonical.Digester()
pr, pw := io.Pipe()
readCompleted := new(atomic.Bool)
uncompressedSize := new(atomic.Int64)
go func() {
r, err := opts.compression.Reader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw))
var size int64
var decompressFunc func(io.Reader) (io.ReadCloser, error)
if _, ok := opts.compression.(*gzipCompression); ok && opts.gzipHelperFunc != nil {
decompressFunc = opts.gzipHelperFunc
} else {
decompressFunc = opts.compression.Reader
}
decompressR, err := decompressFunc(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw))
if err != nil {
pw.CloseWithError(err)
return
}
defer r.Close()
if _, err := io.Copy(diffID.Hash(), r); err != nil {
defer decompressR.Close()
if size, err = io.Copy(diffID.Hash(), decompressR); err != nil {
pw.CloseWithError(err)
return
}
uncompressedSize.Store(size)
readCompleted.Store(true)
pw.Close()
}()
return &Blob{
@@ -270,8 +312,10 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
Reader: pr,
closeFunc: layerFiles.CleanupAll,
},
tocDigest: tocDgst,
diffID: diffID,
tocDigest: tocDgst,
diffID: diffID,
readCompleted: readCompleted,
uncompressedSize: uncompressedSize,
}, nil
}
@@ -366,8 +410,9 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri
// Sort the tar file respecting to the prioritized files list.
sorted := &tarFile{}
picked := make(map[string]struct{})
for _, l := range prioritized {
if err := moveRec(l, intar, sorted); err != nil {
if err := moveRec(l, intar, sorted, picked); err != nil {
if errors.Is(err, errNotFound) && missedPrioritized != nil {
*missedPrioritized = append(*missedPrioritized, l)
continue // allow not found
@@ -395,8 +440,8 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri
})
}
// Dump all entry and concatinate them.
return append(sorted.dump(), intar.dump()...), nil
// Dump prioritized entries followed by the rest entries while skipping picked ones.
return append(sorted.dump(nil), intar.dump(picked)...), nil
}
// readerFromEntries returns a reader of tar archive that contains entries passed
@@ -408,11 +453,11 @@ func readerFromEntries(entries ...*entry) io.Reader {
defer tw.Close()
for _, entry := range entries {
if err := tw.WriteHeader(entry.header); err != nil {
pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err))
pw.CloseWithError(fmt.Errorf("failed to write tar header: %v", err))
return
}
if _, err := io.Copy(tw, entry.payload); err != nil {
pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err))
pw.CloseWithError(fmt.Errorf("failed to write tar payload: %v", err))
return
}
}
@@ -458,36 +503,42 @@ func importTar(in io.ReaderAt) (*tarFile, error) {
return tf, nil
}
func moveRec(name string, in *tarFile, out *tarFile) error {
func moveRec(name string, in *tarFile, out *tarFile, picked map[string]struct{}) error {
name = cleanEntryName(name)
if name == "" { // root directory. stop recursion.
if e, ok := in.get(name); ok {
// entry of the root directory exists. we should move it as well.
// this case will occur if tar entries are prefixed with "./", "/", etc.
out.add(e)
in.remove(name)
if _, done := picked[name]; !done {
out.add(e)
picked[name] = struct{}{}
}
}
return nil
}
_, okIn := in.get(name)
_, okOut := out.get(name)
if !okIn && !okOut {
_, okPicked := picked[name]
if !okIn && !okOut && !okPicked {
return fmt.Errorf("file: %q: %w", name, errNotFound)
}
parent, _ := path.Split(strings.TrimSuffix(name, "/"))
if err := moveRec(parent, in, out); err != nil {
if err := moveRec(parent, in, out, picked); err != nil {
return err
}
if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink {
if err := moveRec(e.header.Linkname, in, out); err != nil {
if err := moveRec(e.header.Linkname, in, out, picked); err != nil {
return err
}
}
if _, done := picked[name]; done {
return nil
}
if e, ok := in.get(name); ok {
out.add(e)
in.remove(name)
picked[name] = struct{}{}
}
return nil
}
@@ -533,8 +584,18 @@ func (f *tarFile) get(name string) (e *entry, ok bool) {
return
}
func (f *tarFile) dump() []*entry {
return f.stream
func (f *tarFile) dump(skip map[string]struct{}) []*entry {
if len(skip) == 0 {
return f.stream
}
var out []*entry
for _, e := range f.stream {
if _, ok := skip[cleanEntryName(e.header.Name)]; ok {
continue
}
out = append(out, e)
}
return out
}
type readCloser struct {
@@ -627,12 +688,12 @@ func (cr *countReadSeeker) Seek(offset int64, whence int) (int64, error) {
switch whence {
default:
return 0, fmt.Errorf("Unknown whence: %v", whence)
return 0, fmt.Errorf("unknown whence: %v", whence)
case io.SeekStart:
case io.SeekCurrent:
offset += *cr.cPos
case io.SeekEnd:
return 0, fmt.Errorf("Unsupported whence: %v", whence)
return 0, fmt.Errorf("unsupported whence: %v", whence)
}
if offset < 0 {
@@ -649,7 +710,7 @@ func (cr *countReadSeeker) currentPos() int64 {
return *cr.cPos
}
func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, error) {
func decompressBlob(org *io.SectionReader, tmp *tempFiles, gzipHelperFunc GzipHelperFunc) (*io.SectionReader, error) {
if org.Size() < 4 {
return org, nil
}
@@ -660,7 +721,13 @@ func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, e
var dR io.Reader
if bytes.Equal([]byte{0x1F, 0x8B, 0x08}, src[:3]) {
// gzip
dgR, err := gzip.NewReader(io.NewSectionReader(org, 0, org.Size()))
var dgR io.ReadCloser
var err error
if gzipHelperFunc != nil {
dgR, err = gzipHelperFunc(io.NewSectionReader(org, 0, org.Size()))
} else {
dgR, err = gzip.NewReader(io.NewSectionReader(org, 0, org.Size()))
}
if err != nil {
return nil, err
}

View File

@@ -307,6 +307,15 @@ func (r *Reader) initFields() error {
}
}
if len(r.m) == 0 {
r.m[""] = &TOCEntry{
Name: "",
Type: "dir",
Mode: 0755,
NumLink: 1,
}
}
return nil
}

View File

@@ -109,7 +109,7 @@ func gzipFooterBytes(tocOff int64) []byte {
header[0], header[1] = 'S', 'G'
subfield := fmt.Sprintf("%016xSTARGZ", tocOff)
binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952
gz.Header.Extra = append(header, []byte(subfield)...)
gz.Extra = append(header, []byte(subfield)...)
gz.Close()
if buf.Len() != FooterSize {
panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize))
@@ -136,7 +136,7 @@ func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, t
return 0, 0, 0, err
}
defer zr.Close()
extra := zr.Header.Extra
extra := zr.Extra
si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:]
if si1 != 'S' || si2 != 'G' {
return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2)
@@ -181,7 +181,7 @@ func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOff
return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err)
}
defer zr.Close()
extra := zr.Header.Extra
extra := zr.Extra
if len(extra) != 16+len("STARGZ") {
return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size")
}

View File

@@ -38,7 +38,6 @@ import (
"reflect"
"sort"
"strings"
"testing"
"time"
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
@@ -49,16 +48,48 @@ import (
// TestingController is Compression with some helper methods necessary for testing.
type TestingController interface {
Compression
TestStreams(t *testing.T, b []byte, streams []int64)
DiffIDOf(*testing.T, []byte) string
TestStreams(t TestingT, b []byte, streams []int64)
DiffIDOf(TestingT, []byte) string
String() string
}
// TestingT is the minimal set of testing.T required to run the
// tests defined in CompressionTestSuite. This interface exists to prevent
// leaking the testing package from being exposed outside tests.
type TestingT interface {
Errorf(format string, args ...any)
FailNow()
Failed() bool
Fatal(args ...any)
Fatalf(format string, args ...any)
Logf(format string, args ...any)
Parallel()
}
// Runner allows running subtests of TestingT. This exists instead of adding
// a Run method to TestingT interface because the Run implementation of
// testing.T would not satisfy the interface.
type Runner func(t TestingT, name string, fn func(t TestingT))
type TestRunner struct {
TestingT
Runner Runner
}
func (r *TestRunner) Run(name string, run func(*TestRunner)) {
r.Runner(r.TestingT, name, func(t TestingT) {
run(&TestRunner{TestingT: t, Runner: r.Runner})
})
}
// CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them.
func CompressionTestSuite(t *testing.T, controllers ...TestingControllerFactory) {
t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) })
t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) })
t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) })
func CompressionTestSuite(t *TestRunner, controllers ...TestingControllerFactory) {
t.Run("testBuild", func(t *TestRunner) { t.Parallel(); testBuild(t, controllers...) })
t.Run("testDigestAndVerify", func(t *TestRunner) {
t.Parallel()
testDigestAndVerify(t, controllers...)
})
t.Run("testWriteAndOpen", func(t *TestRunner) { t.Parallel(); testWriteAndOpen(t, controllers...) })
}
type TestingControllerFactory func() TestingController
@@ -79,7 +110,7 @@ var allowedPrefix = [4]string{"", "./", "/", "../"}
// testBuild tests the resulting stargz blob built by this pkg has the same
// contents as the normal stargz blob.
func testBuild(t *testing.T, controllers ...TestingControllerFactory) {
func testBuild(t *TestRunner, controllers ...TestingControllerFactory) {
tests := []struct {
name string
chunkSize int
@@ -165,7 +196,7 @@ func testBuild(t *testing.T, controllers ...TestingControllerFactory) {
prefix := prefix
for _, minChunkSize := range tt.minChunkSize {
minChunkSize := minChunkSize
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *testing.T) {
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *TestRunner) {
tarBlob := buildTar(t, tt.in, prefix, srcTarFormat)
// Test divideEntries()
entries, err := sortEntries(tarBlob, nil, nil) // identical order
@@ -265,7 +296,7 @@ func testBuild(t *testing.T, controllers ...TestingControllerFactory) {
}
}
func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool {
func isSameTarGz(t TestingT, cla TestingController, a []byte, clb TestingController, b []byte) bool {
aGz, err := cla.Reader(bytes.NewReader(a))
if err != nil {
t.Fatalf("failed to read A")
@@ -325,7 +356,7 @@ func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingContr
return true
}
func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool {
func isSameVersion(t TestingT, cla TestingController, a []byte, clb TestingController, b []byte) bool {
aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), cla)
if err != nil {
t.Fatalf("failed to parse A: %v", err)
@@ -339,7 +370,7 @@ func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingCon
return aJTOC.Version == bJTOC.Version
}
func isSameEntries(t *testing.T, a, b *Reader) bool {
func isSameEntries(t TestingT, a, b *Reader) bool {
aroot, ok := a.Lookup("")
if !ok {
t.Fatalf("failed to get root of A")
@@ -353,18 +384,19 @@ func isSameEntries(t *testing.T, a, b *Reader) bool {
return contains(t, aEntry, bEntry) && contains(t, bEntry, aEntry)
}
func compressBlob(t *testing.T, src *io.SectionReader, srcCompression int) *io.SectionReader {
func compressBlob(t TestingT, src *io.SectionReader, srcCompression int) *io.SectionReader {
buf := new(bytes.Buffer)
var w io.WriteCloser
var err error
if srcCompression == gzipType {
switch srcCompression {
case gzipType:
w = gzip.NewWriter(buf)
} else if srcCompression == zstdType {
case zstdType:
w, err = zstd.NewWriter(buf)
if err != nil {
t.Fatalf("failed to init zstd writer: %v", err)
}
} else {
default:
return src
}
src.Seek(0, io.SeekStart)
@@ -386,7 +418,7 @@ type stargzEntry struct {
// contains checks if all child entries in "b" are also contained in "a".
// This function also checks if the files/chunks contain the same contents among "a" and "b".
func contains(t *testing.T, a, b stargzEntry) bool {
func contains(t TestingT, a, b stargzEntry) bool {
ae, ar := a.e, a.r
be, br := b.e, b.r
t.Logf("Comparing: %q vs %q", ae.Name, be.Name)
@@ -445,7 +477,7 @@ func contains(t *testing.T, a, b stargzEntry) bool {
bbytes, bnext, bok := readOffset(t, bf, nr, b)
if !aok && !bok {
break
} else if !(aok && bok) || anext != bnext {
} else if !aok || !bok || anext != bnext {
t.Logf("%q != %q (offset=%d): chunk existence a=%v vs b=%v, anext=%v vs bnext=%v",
ae.Name, be.Name, nr, aok, bok, anext, bnext)
return false
@@ -497,7 +529,7 @@ func equalEntry(a, b *TOCEntry) bool {
a.Digest == b.Digest
}
func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) {
func readOffset(t TestingT, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) {
ce, ok := e.r.ChunkEntryForOffset(e.e.Name, offset)
if !ok {
return nil, 0, false
@@ -516,7 +548,7 @@ func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry)
return data[:n], offset + ce.ChunkSize, true
}
func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string {
func dumpTOCJSON(t TestingT, tocJSON *JTOC) string {
jtocData, err := json.Marshal(*tocJSON)
if err != nil {
t.Fatalf("failed to marshal TOC JSON: %v", err)
@@ -530,20 +562,19 @@ func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string {
const chunkSize = 3
// type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int)
type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory)
type check func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory)
// testDigestAndVerify runs specified checks against sample stargz blobs.
func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) {
func testDigestAndVerify(t *TestRunner, controllers ...TestingControllerFactory) {
tests := []struct {
name string
tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry)
tarInit func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry)
checks []check
minChunkSize []int
}{
{
name: "no-regfile",
tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) {
return tarOf(
dir("test/"),
)
@@ -558,7 +589,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory)
},
{
name: "small-files",
tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) {
return tarOf(
regDigest(t, "baz.txt", "", dgstMap),
regDigest(t, "foo.txt", "a", dgstMap),
@@ -582,7 +613,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory)
},
{
name: "big-files",
tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) {
return tarOf(
regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap),
regDigest(t, "foo.txt", "a", dgstMap),
@@ -606,7 +637,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory)
{
name: "with-non-regfiles",
minChunkSize: []int{0, 64000},
tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) {
return tarOf(
regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap),
regDigest(t, "foo.txt", "a", dgstMap),
@@ -653,7 +684,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory)
srcTarFormat := srcTarFormat
for _, minChunkSize := range tt.minChunkSize {
minChunkSize := minChunkSize
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *testing.T) {
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *TestRunner) {
// Get original tar file and chunk digests
dgstMap := make(map[string]digest.Digest)
tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat)
@@ -689,7 +720,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory)
// checkStargzTOC checks the TOC JSON of the passed stargz has the expected
// digest and contains valid chunks. It walks all entries in the stargz and
// checks all chunk digests stored to the TOC JSON match the actual contents.
func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
func checkStargzTOC(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
sgz, err := Open(
io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
WithDecompressors(controller),
@@ -800,7 +831,7 @@ func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM
// checkVerifyTOC checks the verification works for the TOC JSON of the passed
// stargz. It walks all entries in the stargz and checks the verifications for
// all chunks work.
func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
func checkVerifyTOC(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
sgz, err := Open(
io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
WithDecompressors(controller),
@@ -881,9 +912,9 @@ func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM
// checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be
// detected during the verification and the verification returns an error.
func checkVerifyInvalidTOCEntryFail(filename string) check {
return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
return func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
funcs := map[string]rewriteFunc{
"lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) {
"lost digest in a entry": func(t TestingT, toc *JTOC, sgz *io.SectionReader) {
var found bool
for _, e := range toc.Entries {
if cleanEntryName(e.Name) == filename {
@@ -901,7 +932,7 @@ func checkVerifyInvalidTOCEntryFail(filename string) check {
t.Fatalf("rewrite target not found")
}
},
"duplicated entry offset": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) {
"duplicated entry offset": func(t TestingT, toc *JTOC, sgz *io.SectionReader) {
var (
sampleEntry *TOCEntry
targetEntry *TOCEntry
@@ -928,7 +959,7 @@ func checkVerifyInvalidTOCEntryFail(filename string) check {
}
for name, rFunc := range funcs {
t.Run(name, func(t *testing.T) {
t.Run(name, func(t *TestRunner) {
newSgz, newTocDigest := rewriteTOCJSON(t, io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), rFunc, controller)
buf := new(bytes.Buffer)
if _, err := io.Copy(buf, newSgz); err != nil {
@@ -957,7 +988,7 @@ func checkVerifyInvalidTOCEntryFail(filename string) check {
// checkVerifyInvalidStargzFail checks if the verification detects that the
// given stargz file doesn't match to the expected digest and returns error.
func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check {
return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
return func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
cl := newController()
rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(cl))
if err != nil {
@@ -989,7 +1020,7 @@ func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check {
// checkVerifyBrokenContentFail checks if the verifier detects broken contents
// that doesn't match to the expected digest and returns error.
func checkVerifyBrokenContentFail(filename string) check {
return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
return func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
// Parse stargz file
sgz, err := Open(
io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
@@ -1046,9 +1077,9 @@ func chunkID(name string, offset, size int64) string {
return fmt.Sprintf("%s-%d-%d", cleanEntryName(name), offset, size)
}
type rewriteFunc func(t *testing.T, toc *JTOC, sgz *io.SectionReader)
type rewriteFunc func(t TestingT, toc *JTOC, sgz *io.SectionReader)
func rewriteTOCJSON(t *testing.T, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) {
func rewriteTOCJSON(t TestingT, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) {
decodedJTOC, jtocOffset, err := parseStargz(sgz, controller)
if err != nil {
t.Fatalf("failed to extract TOC JSON: %v", err)
@@ -1119,7 +1150,7 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT
return decodedJTOC, tocOffset, nil
}
func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) {
func testWriteAndOpen(t *TestRunner, controllers ...TestingControllerFactory) {
const content = "Some contents"
invalidUtf8 := "\xff\xfe\xfd"
@@ -1463,7 +1494,7 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) {
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
srcTarFormat := srcTarFormat
for _, lossless := range []bool{true, false} {
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *testing.T) {
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *TestRunner) {
var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat)
origTarDgstr := digest.Canonical.Digester()
tr = io.TeeReader(tr, origTarDgstr.Hash())
@@ -1529,6 +1560,9 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) {
if err != nil {
t.Fatalf("stargz.Open: %v", err)
}
if _, ok := r.Lookup(""); !ok {
t.Fatalf("failed to lookup rootdir: %v", err)
}
wantTOCVersion := 1
if tt.wantTOCVersion > 0 {
wantTOCVersion = tt.wantTOCVersion
@@ -1627,7 +1661,7 @@ func digestFor(content string) string {
type numTOCEntries int
func (n numTOCEntries) check(t *testing.T, r *Reader) {
func (n numTOCEntries) check(t TestingT, r *Reader) {
if r.toc == nil {
t.Fatal("nil TOC")
}
@@ -1647,15 +1681,15 @@ func (n numTOCEntries) check(t *testing.T, r *Reader) {
func checks(s ...stargzCheck) []stargzCheck { return s }
type stargzCheck interface {
check(t *testing.T, r *Reader)
check(t TestingT, r *Reader)
}
type stargzCheckFn func(*testing.T, *Reader)
type stargzCheckFn func(TestingT, *Reader)
func (f stargzCheckFn) check(t *testing.T, r *Reader) { f(t, r) }
func (f stargzCheckFn) check(t TestingT, r *Reader) { f(t, r) }
func maxDepth(max int) stargzCheck {
return stargzCheckFn(func(t *testing.T, r *Reader) {
return stargzCheckFn(func(t TestingT, r *Reader) {
e, ok := r.Lookup("")
if !ok {
t.Fatal("root directory not found")
@@ -1672,7 +1706,7 @@ func maxDepth(max int) stargzCheck {
})
}
func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr error) {
func getMaxDepth(t TestingT, e *TOCEntry, current, limit int) (max int, rErr error) {
if current > limit {
return -1, fmt.Errorf("walkMaxDepth: exceeds limit: current:%d > limit:%d",
current, limit)
@@ -1694,7 +1728,7 @@ func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr e
}
func hasFileLen(file string, wantLen int) stargzCheck {
return stargzCheckFn(func(t *testing.T, r *Reader) {
return stargzCheckFn(func(t TestingT, r *Reader) {
for _, ent := range r.toc.Entries {
if ent.Name == file {
if ent.Type != "reg" {
@@ -1710,7 +1744,7 @@ func hasFileLen(file string, wantLen int) stargzCheck {
}
func hasFileXattrs(file, name, value string) stargzCheck {
return stargzCheckFn(func(t *testing.T, r *Reader) {
return stargzCheckFn(func(t TestingT, r *Reader) {
for _, ent := range r.toc.Entries {
if ent.Name == file {
if ent.Type != "reg" {
@@ -1737,7 +1771,7 @@ func hasFileXattrs(file, name, value string) stargzCheck {
}
func hasFileDigest(file string, digest string) stargzCheck {
return stargzCheckFn(func(t *testing.T, r *Reader) {
return stargzCheckFn(func(t TestingT, r *Reader) {
ent, ok := r.Lookup(file)
if !ok {
t.Fatalf("didn't find TOCEntry for file %q", file)
@@ -1749,7 +1783,7 @@ func hasFileDigest(file string, digest string) stargzCheck {
}
func hasFileContentsWithPreRead(file string, offset int, want string, extra ...chunkInfo) stargzCheck {
return stargzCheckFn(func(t *testing.T, r *Reader) {
return stargzCheckFn(func(t TestingT, r *Reader) {
extraMap := make(map[string]chunkInfo)
for _, e := range extra {
extraMap[e.name] = e
@@ -1796,7 +1830,7 @@ func hasFileContentsWithPreRead(file string, offset int, want string, extra ...c
}
func hasFileContentsRange(file string, offset int, want string) stargzCheck {
return stargzCheckFn(func(t *testing.T, r *Reader) {
return stargzCheckFn(func(t TestingT, r *Reader) {
f, err := r.OpenFile(file)
if err != nil {
t.Fatal(err)
@@ -1813,7 +1847,7 @@ func hasFileContentsRange(file string, offset int, want string) stargzCheck {
}
func hasChunkEntries(file string, wantChunks int) stargzCheck {
return stargzCheckFn(func(t *testing.T, r *Reader) {
return stargzCheckFn(func(t TestingT, r *Reader) {
ent, ok := r.Lookup(file)
if !ok {
t.Fatalf("no file for %q", file)
@@ -1857,7 +1891,7 @@ func hasChunkEntries(file string, wantChunks int) stargzCheck {
}
func entryHasChildren(dir string, want ...string) stargzCheck {
return stargzCheckFn(func(t *testing.T, r *Reader) {
return stargzCheckFn(func(t TestingT, r *Reader) {
want := append([]string(nil), want...)
var got []string
ent, ok := r.Lookup(dir)
@@ -1876,7 +1910,7 @@ func entryHasChildren(dir string, want ...string) stargzCheck {
}
func hasDir(file string) stargzCheck {
return stargzCheckFn(func(t *testing.T, r *Reader) {
return stargzCheckFn(func(t TestingT, r *Reader) {
for _, ent := range r.toc.Entries {
if ent.Name == cleanEntryName(file) {
if ent.Type != "dir" {
@@ -1890,7 +1924,7 @@ func hasDir(file string) stargzCheck {
}
func hasDirLinkCount(file string, count int) stargzCheck {
return stargzCheckFn(func(t *testing.T, r *Reader) {
return stargzCheckFn(func(t TestingT, r *Reader) {
for _, ent := range r.toc.Entries {
if ent.Name == cleanEntryName(file) {
if ent.Type != "dir" {
@@ -1908,7 +1942,7 @@ func hasDirLinkCount(file string, count int) stargzCheck {
}
func hasMode(file string, mode os.FileMode) stargzCheck {
return stargzCheckFn(func(t *testing.T, r *Reader) {
return stargzCheckFn(func(t TestingT, r *Reader) {
for _, ent := range r.toc.Entries {
if ent.Name == cleanEntryName(file) {
if ent.Stat().Mode() != mode {
@@ -1923,7 +1957,7 @@ func hasMode(file string, mode os.FileMode) stargzCheck {
}
func hasSymlink(file, target string) stargzCheck {
return stargzCheckFn(func(t *testing.T, r *Reader) {
return stargzCheckFn(func(t TestingT, r *Reader) {
for _, ent := range r.toc.Entries {
if ent.Name == file {
if ent.Type != "symlink" {
@@ -1939,7 +1973,7 @@ func hasSymlink(file, target string) stargzCheck {
}
func lookupMatch(name string, want *TOCEntry) stargzCheck {
return stargzCheckFn(func(t *testing.T, r *Reader) {
return stargzCheckFn(func(t TestingT, r *Reader) {
e, ok := r.Lookup(name)
if !ok {
t.Fatalf("failed to Lookup entry %q", name)
@@ -1952,7 +1986,7 @@ func lookupMatch(name string, want *TOCEntry) stargzCheck {
}
func hasEntryOwner(entry string, owner owner) stargzCheck {
return stargzCheckFn(func(t *testing.T, r *Reader) {
return stargzCheckFn(func(t TestingT, r *Reader) {
ent, ok := r.Lookup(strings.TrimSuffix(entry, "/"))
if !ok {
t.Errorf("entry %q not found", entry)
@@ -1966,7 +2000,7 @@ func hasEntryOwner(entry string, owner owner) stargzCheck {
}
func mustSameEntry(files ...string) stargzCheck {
return stargzCheckFn(func(t *testing.T, r *Reader) {
return stargzCheckFn(func(t TestingT, r *Reader) {
var first *TOCEntry
for _, f := range files {
if first == nil {
@@ -2038,7 +2072,7 @@ func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format
return f(tw, prefix, format)
}
func buildTar(t *testing.T, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader {
func buildTar(t TestingT, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader {
format := tar.FormatUnknown
for _, opt := range opts {
switch v := opt.(type) {
@@ -2247,7 +2281,7 @@ func noPrefetchLandmark() tarEntry {
})
}
func regDigest(t *testing.T, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry {
func regDigest(t TestingT, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry {
if digestMap == nil {
t.Fatalf("digest map mustn't be nil")
}
@@ -2317,7 +2351,7 @@ func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() }
func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() }
func (f fileInfoOnlyMode) Sys() interface{} { return nil }
func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) {
func CheckGzipHasStreams(t TestingT, b []byte, streams []int64) {
if len(streams) == 0 {
return // nop
}
@@ -2346,8 +2380,8 @@ func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) {
t.Fatalf("countStreams(gzip), Copy: %v", err)
}
var extra string
if len(zr.Header.Extra) > 0 {
extra = fmt.Sprintf("; extra=%q", zr.Header.Extra)
if len(zr.Extra) > 0 {
extra = fmt.Sprintf("; extra=%q", zr.Extra)
}
t.Logf(" [%d] at %d in stargz, uncompressed length %d%s", numStreams, zoff, n, extra)
delete(wants, int64(zoff))
@@ -2355,7 +2389,7 @@ func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) {
}
}
func GzipDiffIDOf(t *testing.T, b []byte) string {
func GzipDiffIDOf(t TestingT, b []byte) string {
h := sha256.New()
zr, err := gzip.NewReader(bytes.NewReader(b))
if err != nil {

View File

@@ -615,11 +615,11 @@ func (d *dockerImageDestination) PutSignaturesWithFormat(ctx context.Context, si
}
switch {
case d.c.supportsSignatures:
if err := d.putSignaturesToAPIExtension(ctx, signatures, *instanceDigest); err != nil {
if err := d.putSignaturesToAPIExtension(ctx, otherSignatures, *instanceDigest); err != nil {
return err
}
case d.c.signatureBase != nil:
if err := d.putSignaturesToLookaside(signatures, *instanceDigest); err != nil {
if err := d.putSignaturesToLookaside(otherSignatures, *instanceDigest); err != nil {
return err
}
default:

View File

@@ -118,6 +118,7 @@ type PutBlobOptions struct {
// PutBlobPartialOptions are used in PutBlobPartial.
type PutBlobPartialOptions struct {
Cache blobinfocache.BlobInfoCache2 // Cache to use and/or update.
EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
LayerIndex int // A zero-based index of the layer within the image (PutBlobPartial is only called with layer-like blobs, not configs)
}

View File

@@ -1,547 +0,0 @@
//go:build containers_image_ostree
// +build containers_image_ostree
package ostree
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"io/fs"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"time"
"unsafe"
"github.com/containers/image/v5/internal/imagedestination/impl"
"github.com/containers/image/v5/internal/imagedestination/stubs"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/fileutils"
"github.com/klauspost/pgzip"
"github.com/opencontainers/go-digest"
selinux "github.com/opencontainers/selinux/go-selinux"
"github.com/ostreedev/ostree-go/pkg/otbuiltin"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
)
// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 libselinux
// #include <glib.h>
// #include <glib-object.h>
// #include <gio/gio.h>
// #include <stdlib.h>
// #include <ostree.h>
// #include <gio/ginputstream.h>
// #include <selinux/selinux.h>
// #include <selinux/label.h>
import "C"
type blobToImport struct {
Size int64
Digest digest.Digest
BlobPath string
}
type descriptor struct {
Size int64 `json:"size"`
Digest digest.Digest `json:"digest"`
}
type fsLayersSchema1 struct {
BlobSum digest.Digest `json:"blobSum"`
}
type manifestSchema struct {
LayersDescriptors []descriptor `json:"layers"`
FSLayers []fsLayersSchema1 `json:"fsLayers"`
}
type ostreeImageDestination struct {
impl.Compat
impl.PropertyMethodsInitialize
stubs.NoPutBlobPartialInitialize
stubs.AlwaysSupportsSignatures
ref ostreeReference
manifest string
schema manifestSchema
tmpDirPath string
blobs map[string]*blobToImport
digest digest.Digest
signaturesLen int
repo *C.struct_OstreeRepo
}
// newImageDestination returns an ImageDestination for writing to an existing ostree.
func newImageDestination(ref ostreeReference, tmpDirPath string) (private.ImageDestination, error) {
tmpDirPath = filepath.Join(tmpDirPath, ref.branchName)
if err := ensureDirectoryExists(tmpDirPath); err != nil {
return nil, err
}
d := &ostreeImageDestination{
PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
SupportedManifestMIMETypes: []string{manifest.DockerV2Schema2MediaType},
DesiredLayerCompression: types.PreserveOriginal,
AcceptsForeignLayerURLs: false,
MustMatchRuntimeOS: true,
IgnoresEmbeddedDockerReference: false, // N/A, DockerReference() returns nil.
HasThreadSafePutBlob: false,
}),
NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
ref: ref,
manifest: "",
schema: manifestSchema{},
tmpDirPath: tmpDirPath,
blobs: map[string]*blobToImport{},
digest: "",
signaturesLen: 0,
repo: nil,
}
d.Compat = impl.AddCompat(d)
return d, nil
}
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
func (d *ostreeImageDestination) Reference() types.ImageReference {
return d.ref
}
// Close removes resources associated with an initialized ImageDestination, if any.
func (d *ostreeImageDestination) Close() error {
if d.repo != nil {
C.g_object_unref(C.gpointer(d.repo))
}
return os.RemoveAll(d.tmpDirPath)
}
// PutBlobWithOptions writes contents of stream and returns data representing the result.
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
// inputInfo.Size is the expected length of stream, if known.
// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
tmpDir, err := os.MkdirTemp(d.tmpDirPath, "blob")
if err != nil {
return private.UploadedBlob{}, err
}
digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
blobPath := filepath.Join(tmpDir, "content")
blobFile, err := os.Create(blobPath)
if err != nil {
return private.UploadedBlob{}, err
}
size, err := func() (_ int64, retErr error) { // A scope for defer
// since we are writing to this file, make sure we handle errors
defer func() {
closeErr := blobFile.Close()
if retErr == nil {
retErr = closeErr
}
}()
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
return io.Copy(blobFile, stream)
}()
if err != nil {
return private.UploadedBlob{}, err
}
blobDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size {
return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
}
if err := blobFile.Sync(); err != nil {
return private.UploadedBlob{}, err
}
hash := blobDigest.Encoded()
d.blobs[hash] = &blobToImport{Size: size, Digest: blobDigest, BlobPath: blobPath}
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
}
func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error {
entries, err := os.ReadDir(dir)
if err != nil {
return err
}
for _, entry := range entries {
fullpath := filepath.Join(dir, entry.Name())
if entry.Type()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
if err := os.Remove(fullpath); err != nil {
return err
}
continue
}
info, err := entry.Info()
if err != nil {
return err
}
if selinuxHnd != nil {
relPath, err := filepath.Rel(root, fullpath)
if err != nil {
return err
}
// Handle /exports/hostfs as a special case. Files under this directory are copied to the host,
// thus we benefit from maintaining the same SELinux label they would have on the host as we could
// use hard links instead of copying the files.
relPath = fmt.Sprintf("/%s", strings.TrimPrefix(relPath, "exports/hostfs/"))
relPathC := C.CString(relPath)
defer C.free(unsafe.Pointer(relPathC))
var context *C.char
res, err := C.selabel_lookup_raw(selinuxHnd, &context, relPathC, C.int(info.Mode()&os.ModePerm))
if int(res) < 0 && err != syscall.ENOENT {
return fmt.Errorf("cannot selabel_lookup_raw %s: %w", relPath, err)
}
if int(res) == 0 {
defer C.freecon(context)
fullpathC := C.CString(fullpath)
defer C.free(unsafe.Pointer(fullpathC))
res, err = C.lsetfilecon_raw(fullpathC, context)
if int(res) < 0 {
return fmt.Errorf("cannot setfilecon_raw %s to %s: %w", fullpath, C.GoString(context), err)
}
}
}
if entry.IsDir() {
if usermode {
if err := os.Chmod(fullpath, info.Mode()|0700); err != nil {
return err
}
}
err = fixFiles(selinuxHnd, root, fullpath, usermode)
if err != nil {
return err
}
} else if usermode && (entry.Type().IsRegular()) {
if err := os.Chmod(fullpath, info.Mode()|0600); err != nil {
return err
}
}
}
return nil
}
func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch string, root string, metadata []string) error {
opts := otbuiltin.NewCommitOptions()
opts.AddMetadataString = metadata
opts.Timestamp = time.Now()
// OCI layers have no parent OSTree commit
opts.Parent = "0000000000000000000000000000000000000000000000000000000000000000"
_, err := repo.Commit(root, branch, opts)
return err
}
func generateTarSplitMetadata(output *bytes.Buffer, file string) (_ digest.Digest, _ int64, retErr error) {
mfz := pgzip.NewWriter(output)
// since we are writing to this, make sure we handle errors
defer func() {
closeErr := mfz.Close()
if retErr == nil {
retErr = closeErr
}
}()
metaPacker := storage.NewJSONPacker(mfz)
stream, err := os.OpenFile(file, os.O_RDONLY, 0)
if err != nil {
return "", -1, err
}
defer stream.Close()
gzReader, err := archive.DecompressStream(stream)
if err != nil {
return "", -1, err
}
defer gzReader.Close()
its, err := asm.NewInputTarStream(gzReader, metaPacker, nil)
if err != nil {
return "", -1, err
}
digester := digest.Canonical.Digester()
written, err := io.Copy(digester.Hash(), its)
if err != nil {
return "", -1, err
}
return digester.Digest(), written, nil
}
func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, repo *otbuiltin.Repo, blob *blobToImport) error {
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Encoded())
destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Encoded(), "root")
if err := ensureDirectoryExists(destinationPath); err != nil {
return err
}
defer func() {
os.Remove(blob.BlobPath)
os.RemoveAll(destinationPath)
}()
var tarSplitOutput bytes.Buffer
uncompressedDigest, uncompressedSize, err := generateTarSplitMetadata(&tarSplitOutput, blob.BlobPath)
if err != nil {
return err
}
if os.Getuid() == 0 {
if err := archive.UntarPath(blob.BlobPath, destinationPath); err != nil {
return err
}
if err := fixFiles(selinuxHnd, destinationPath, destinationPath, false); err != nil {
return err
}
} else {
os.MkdirAll(destinationPath, 0755)
if err := exec.Command("tar", "-C", destinationPath, "--no-same-owner", "--no-same-permissions", "--delay-directory-restore", "-xf", blob.BlobPath).Run(); err != nil {
return err
}
if err := fixFiles(selinuxHnd, destinationPath, destinationPath, true); err != nil {
return err
}
}
return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size),
fmt.Sprintf("docker.uncompressed_size=%d", uncompressedSize),
fmt.Sprintf("docker.uncompressed_digest=%s", uncompressedDigest.String()),
fmt.Sprintf("tarsplit.output=%s", base64.StdEncoding.EncodeToString(tarSplitOutput.Bytes()))})
}
func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error {
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Encoded())
destinationPath := filepath.Dir(blob.BlobPath)
return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)})
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
return false, private.ReusedBlob{}, nil
}
if d.repo == nil {
repo, err := openRepo(d.ref.repo)
if err != nil {
return false, private.ReusedBlob{}, err
}
d.repo = repo
}
if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return false, private.ReusedBlob{}, err
}
branch := fmt.Sprintf("ociimage/%s", info.Digest.Encoded())
found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
if err != nil || !found {
return found, private.ReusedBlob{}, err
}
found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size")
if err != nil || !found {
return found, private.ReusedBlob{}, err
}
found, data, err = readMetadata(d.repo, branch, "docker.size")
if err != nil || !found {
return found, private.ReusedBlob{}, err
}
size, err := strconv.ParseInt(data, 10, 64)
if err != nil {
return false, private.ReusedBlob{}, err
}
return true, private.ReusedBlob{Digest: info.Digest, Size: size}, nil
}
// PutManifest writes manifest to the destination.
// The instanceDigest value is expected to always be nil, because this transport does not support manifest lists, so
// there can be no secondary manifests.
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
func (d *ostreeImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error {
if instanceDigest != nil {
return errors.New(`Manifest lists are not supported by "ostree:"`)
}
d.manifest = string(manifestBlob)
if err := json.Unmarshal(manifestBlob, &d.schema); err != nil {
return err
}
manifestPath := filepath.Join(d.tmpDirPath, d.ref.manifestPath())
if err := ensureParentDirectoryExists(manifestPath); err != nil {
return err
}
digest, err := manifest.Digest(manifestBlob)
if err != nil {
return err
}
d.digest = digest
return os.WriteFile(manifestPath, manifestBlob, 0644)
}
// PutSignaturesWithFormat writes a set of signatures to the destination.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
// MUST be called after PutManifest (signatures may reference manifest contents).
func (d *ostreeImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error {
if instanceDigest != nil {
return errors.New(`Manifest lists are not supported by "ostree:"`)
}
path := filepath.Join(d.tmpDirPath, d.ref.signaturePath(0))
if err := ensureParentDirectoryExists(path); err != nil {
return err
}
for i, sig := range signatures {
signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i))
blob, err := signature.Blob(sig)
if err != nil {
return err
}
if err := os.WriteFile(signaturePath, blob, 0644); err != nil {
return err
}
}
d.signaturesLen = len(signatures)
return nil
}
// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
// WARNING: This does not have any transactional semantics:
// - Uploaded data MAY be visible to others before CommitWithOptions() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed)
func (d *ostreeImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
repo, err := otbuiltin.OpenRepo(d.ref.repo)
if err != nil {
return err
}
_, err = repo.PrepareTransaction()
if err != nil {
return err
}
var selinuxHnd *C.struct_selabel_handle
if os.Getuid() == 0 && selinux.GetEnabled() {
selinuxHnd, err = C.selabel_open(C.SELABEL_CTX_FILE, nil, 0)
if selinuxHnd == nil {
return fmt.Errorf("cannot open the SELinux DB: %w", err)
}
defer C.selabel_close(selinuxHnd)
}
checkLayer := func(hash string) error {
blob := d.blobs[hash]
// if the blob is not present in d.blobs then it is already stored in OSTree,
// and we don't need to import it.
if blob == nil {
return nil
}
err := d.importBlob(selinuxHnd, repo, blob)
if err != nil {
return err
}
delete(d.blobs, hash)
return nil
}
for _, layer := range d.schema.LayersDescriptors {
if err := layer.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return err
}
hash := layer.Digest.Encoded()
if err = checkLayer(hash); err != nil {
return err
}
}
for _, layer := range d.schema.FSLayers {
if err := layer.BlobSum.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return err
}
hash := layer.BlobSum.Encoded()
if err = checkLayer(hash); err != nil {
return err
}
}
// Import the other blobs that are not layers
for _, blob := range d.blobs {
err := d.importConfig(repo, blob)
if err != nil {
return err
}
}
manifestPath := filepath.Join(d.tmpDirPath, "manifest")
metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)),
fmt.Sprintf("signatures=%d", d.signaturesLen),
fmt.Sprintf("docker.digest=%s", string(d.digest))}
if err := d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata); err != nil {
return err
}
_, err = repo.CommitTransaction()
return err
}
func ensureDirectoryExists(path string) error {
if err := fileutils.Exists(path); err != nil && errors.Is(err, fs.ErrNotExist) {
if err := os.MkdirAll(path, 0755); err != nil {
return err
}
}
return nil
}
func ensureParentDirectoryExists(path string) error {
return ensureDirectoryExists(filepath.Dir(path))
}

View File

@@ -1,453 +0,0 @@
//go:build containers_image_ostree
// +build containers_image_ostree
package ostree
import (
"bytes"
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"strconv"
"strings"
"unsafe"
"github.com/containers/image/v5/internal/imagesource/impl"
"github.com/containers/image/v5/internal/imagesource/stubs"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/ioutils"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
glib "github.com/ostreedev/ostree-go/pkg/glibobject"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
)
// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1
// #include <glib.h>
// #include <glib-object.h>
// #include <gio/gio.h>
// #include <stdlib.h>
// #include <ostree.h>
// #include <gio/ginputstream.h>
import "C"
type ostreeImageSource struct {
impl.Compat
impl.PropertyMethodsInitialize
stubs.NoGetBlobAtInitialize
ref ostreeReference
tmpDir string
repo *C.struct_OstreeRepo
// get the compressed layer by its uncompressed checksum
compressed map[digest.Digest]digest.Digest
}
// newImageSource returns an ImageSource for reading from an existing directory.
func newImageSource(tmpDir string, ref ostreeReference) (private.ImageSource, error) {
s := &ostreeImageSource{
PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{
HasThreadSafeGetBlob: false,
}),
NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
ref: ref,
tmpDir: tmpDir,
compressed: nil,
}
s.Compat = impl.AddCompat(s)
return s, nil
}
// Reference returns the reference used to set up this source.
func (s *ostreeImageSource) Reference() types.ImageReference {
return s.ref
}
// Close removes resources associated with an initialized ImageSource, if any.
func (s *ostreeImageSource) Close() error {
if s.repo != nil {
C.g_object_unref(C.gpointer(s.repo))
}
return nil
}
func (s *ostreeImageSource) getBlobUncompressedSize(blob string, isCompressed bool) (int64, error) {
var metadataKey string
if isCompressed {
metadataKey = "docker.uncompressed_size"
} else {
metadataKey = "docker.size"
}
b := fmt.Sprintf("ociimage/%s", blob)
found, data, err := readMetadata(s.repo, b, metadataKey)
if err != nil || !found {
return 0, err
}
return strconv.ParseInt(data, 10, 64)
}
func (s *ostreeImageSource) getLenSignatures() (int64, error) {
b := fmt.Sprintf("ociimage/%s", s.ref.branchName)
found, data, err := readMetadata(s.repo, b, "signatures")
if err != nil {
return -1, err
}
if !found {
// if 'signatures' is not present, just return 0 signatures.
return 0, nil
}
return strconv.ParseInt(data, 10, 64)
}
func (s *ostreeImageSource) getTarSplitData(blob string) ([]byte, error) {
b := fmt.Sprintf("ociimage/%s", blob)
found, out, err := readMetadata(s.repo, b, "tarsplit.output")
if err != nil || !found {
return nil, err
}
return base64.StdEncoding.DecodeString(out)
}
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
// It may use a remote (= slow) service.
// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
// as the primary manifest can not be a list, so there can be non-default instances.
func (s *ostreeImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
if instanceDigest != nil {
return nil, "", errors.New(`Manifest lists are not supported by "ostree:"`)
}
if s.repo == nil {
repo, err := openRepo(s.ref.repo)
if err != nil {
return nil, "", err
}
s.repo = repo
}
b := fmt.Sprintf("ociimage/%s", s.ref.branchName)
found, out, err := readMetadata(s.repo, b, "docker.manifest")
if err != nil {
return nil, "", err
}
if !found {
return nil, "", errors.New("manifest not found")
}
m := []byte(out)
return m, manifest.GuessMIMEType(m), nil
}
func (s *ostreeImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
return nil, "", errors.New("manifest lists are not supported by this transport")
}
func openRepo(path string) (*C.struct_OstreeRepo, error) {
var cerr *C.GError
cpath := C.CString(path)
defer C.free(unsafe.Pointer(cpath))
file := C.g_file_new_for_path(cpath)
defer C.g_object_unref(C.gpointer(file))
repo := C.ostree_repo_new(file)
r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr)))
if !r {
C.g_object_unref(C.gpointer(repo))
return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
}
return repo, nil
}
type ostreePathFileGetter struct {
repo *C.struct_OstreeRepo
parentRoot *C.GFile
}
type ostreeReader struct {
stream *C.GFileInputStream
}
func (o ostreeReader) Close() error {
C.g_object_unref(C.gpointer(o.stream))
return nil
}
func (o ostreeReader) Read(p []byte) (int, error) {
var cerr *C.GError
instanceCast := C.g_type_check_instance_cast((*C.GTypeInstance)(unsafe.Pointer(o.stream)), C.g_input_stream_get_type())
stream := (*C.GInputStream)(unsafe.Pointer(instanceCast))
b := C.g_input_stream_read_bytes(stream, (C.gsize)(cap(p)), nil, &cerr)
if b == nil {
return 0, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
}
defer C.g_bytes_unref(b)
count := int(C.g_bytes_get_size(b))
if count == 0 {
return 0, io.EOF
}
data := unsafe.Slice((*byte)(C.g_bytes_get_data(b, nil)), count)
copy(p, data)
return count, nil
}
func readMetadata(repo *C.struct_OstreeRepo, commit, key string) (bool, string, error) {
var cerr *C.GError
var ref *C.char
defer C.free(unsafe.Pointer(ref))
cCommit := C.CString(commit)
defer C.free(unsafe.Pointer(cCommit))
if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo, cCommit, C.gboolean(1), &ref, &cerr))) {
return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
}
if ref == nil {
return false, "", nil
}
var variant *C.GVariant
if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo, C.OSTREE_OBJECT_TYPE_COMMIT, ref, &variant, &cerr))) {
return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
}
defer C.g_variant_unref(variant)
if variant != nil {
cKey := C.CString(key)
defer C.free(unsafe.Pointer(cKey))
metadata := C.g_variant_get_child_value(variant, 0)
defer C.g_variant_unref(metadata)
data := C.g_variant_lookup_value(metadata, (*C.gchar)(cKey), nil)
if data != nil {
defer C.g_variant_unref(data)
ptr := (*C.char)(C.g_variant_get_string(data, nil))
val := C.GoString(ptr)
return true, val, nil
}
}
return false, "", nil
}
func newOSTreePathFileGetter(repo *C.struct_OstreeRepo, commit string) (*ostreePathFileGetter, error) {
var cerr *C.GError
var parentRoot *C.GFile
cCommit := C.CString(commit)
defer C.free(unsafe.Pointer(cCommit))
if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo, cCommit, &parentRoot, nil, nil, &cerr))) {
return &ostreePathFileGetter{}, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
}
C.g_object_ref(C.gpointer(repo))
return &ostreePathFileGetter{repo: repo, parentRoot: parentRoot}, nil
}
func (o ostreePathFileGetter) Get(filename string) (io.ReadCloser, error) {
var file *C.GFile
filename, _ = strings.CutPrefix(filename, "./")
cfilename := C.CString(filename)
defer C.free(unsafe.Pointer(cfilename))
file = (*C.GFile)(C.g_file_resolve_relative_path(o.parentRoot, cfilename))
var cerr *C.GError
stream := C.g_file_read(file, nil, &cerr)
if stream == nil {
return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
}
return &ostreeReader{stream: stream}, nil
}
func (o ostreePathFileGetter) Close() {
C.g_object_unref(C.gpointer(o.repo))
C.g_object_unref(C.gpointer(o.parentRoot))
}
func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser, error) {
getter, err := newOSTreePathFileGetter(s.repo, commit)
if err != nil {
return nil, err
}
defer getter.Close()
return getter.Get(path)
}
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return nil, -1, err
}
blob := info.Digest.Encoded()
// Ensure s.compressed is initialized. It is build by LayerInfosForCopy.
if s.compressed == nil {
_, err := s.LayerInfosForCopy(ctx, nil)
if err != nil {
return nil, -1, err
}
}
compressedBlob, isCompressed := s.compressed[info.Digest]
if isCompressed {
blob = compressedBlob.Encoded()
}
branch := fmt.Sprintf("ociimage/%s", blob)
if s.repo == nil {
repo, err := openRepo(s.ref.repo)
if err != nil {
return nil, 0, err
}
s.repo = repo
}
layerSize, err := s.getBlobUncompressedSize(blob, isCompressed)
if err != nil {
return nil, 0, err
}
tarsplit, err := s.getTarSplitData(blob)
if err != nil {
return nil, 0, err
}
// if tarsplit is nil we are looking at the manifest. Return directly the file in /content
if tarsplit == nil {
file, err := s.readSingleFile(branch, "/content")
if err != nil {
return nil, 0, err
}
return file, layerSize, nil
}
mf := bytes.NewReader(tarsplit)
mfz, err := pgzip.NewReader(mf)
if err != nil {
return nil, 0, err
}
metaUnpacker := storage.NewJSONUnpacker(mfz)
getter, err := newOSTreePathFileGetter(s.repo, branch)
if err != nil {
mfz.Close()
return nil, 0, err
}
ots := asm.NewOutputTarStream(getter, metaUnpacker)
rc := ioutils.NewReadCloserWrapper(ots, func() error {
getter.Close()
mfz.Close()
return ots.Close()
})
return rc, layerSize, nil
}
// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
// (e.g. if the source never returns manifest lists).
func (s *ostreeImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
if instanceDigest != nil {
return nil, errors.New(`Manifest lists are not supported by "ostree:"`)
}
lenSignatures, err := s.getLenSignatures()
if err != nil {
return nil, err
}
branch := fmt.Sprintf("ociimage/%s", s.ref.branchName)
if s.repo == nil {
repo, err := openRepo(s.ref.repo)
if err != nil {
return nil, err
}
s.repo = repo
}
signatures := []signature.Signature{}
for i := int64(1); i <= lenSignatures; i++ {
path := fmt.Sprintf("/signature-%d", i)
sigReader, err := s.readSingleFile(branch, path)
if err != nil {
return nil, err
}
defer sigReader.Close()
sigBlob, err := io.ReadAll(sigReader)
if err != nil {
return nil, err
}
sig, err := signature.FromBlob(sigBlob)
if err != nil {
return nil, fmt.Errorf("parsing signature %q: %w", path, err)
}
signatures = append(signatures, sig)
}
return signatures, nil
}
// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
// to read the image's layers.
// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
// as the primary manifest can not be a list, so there can be secondary manifests.
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
if instanceDigest != nil {
return nil, errors.New(`Manifest lists are not supported by "ostree:"`)
}
updatedBlobInfos := []types.BlobInfo{}
manifestBlob, manifestType, err := s.GetManifest(ctx, nil)
if err != nil {
return nil, err
}
man, err := manifest.FromBlob(manifestBlob, manifestType)
s.compressed = make(map[digest.Digest]digest.Digest)
layerBlobs := man.LayerInfos()
for _, layerBlob := range layerBlobs {
branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Encoded())
found, uncompressedDigestStr, err := readMetadata(s.repo, branch, "docker.uncompressed_digest")
if err != nil || !found {
return nil, err
}
found, uncompressedSizeStr, err := readMetadata(s.repo, branch, "docker.uncompressed_size")
if err != nil || !found {
return nil, err
}
uncompressedSize, err := strconv.ParseInt(uncompressedSizeStr, 10, 64)
if err != nil {
return nil, err
}
uncompressedDigest, err := digest.Parse(uncompressedDigestStr)
if err != nil {
return nil, err
}
blobInfo := types.BlobInfo{
Digest: uncompressedDigest,
Size: uncompressedSize,
MediaType: layerBlob.MediaType,
}
s.compressed[uncompressedDigest] = layerBlob.Digest
updatedBlobInfos = append(updatedBlobInfos, blobInfo)
}
return updatedBlobInfos, nil
}

View File

@@ -1,242 +0,0 @@
//go:build containers_image_ostree
// +build containers_image_ostree
package ostree
import (
"bytes"
"context"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/containers/image/v5/directory/explicitfilepath"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/regexp"
)
const defaultOSTreeRepo = "/ostree/repo"
// Transport is an ImageTransport for ostree paths.
var Transport = ostreeTransport{}
type ostreeTransport struct{}
func (t ostreeTransport) Name() string {
return "ostree"
}
func init() {
transports.Register(Transport)
}
// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
// scope passed to this function will not be "", that value is always allowed.
func (t ostreeTransport) ValidatePolicyConfigurationScope(scope string) error {
sep := strings.Index(scope, ":")
if sep < 0 {
return fmt.Errorf("Invalid ostree: scope %s: Must include a repo", scope)
}
repo := scope[:sep]
if !strings.HasPrefix(repo, "/") {
return fmt.Errorf("Invalid ostree: scope %s: repository must be an absolute path", scope)
}
cleaned := filepath.Clean(repo)
if cleaned != repo {
return fmt.Errorf(`Invalid ostree: scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
}
// FIXME? In the namespaces within a repo,
// we could be verifying the various character set and length restrictions
// from docker/distribution/reference.regexp.go, but other than that there
// are few semantically invalid strings.
return nil
}
// ostreeReference is an ImageReference for ostree paths.
type ostreeReference struct {
image string
branchName string
repo string
}
type ostreeImageCloser struct {
types.ImageCloser
size int64
}
func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) {
var repo = ""
image, repoPart, gotRepoPart := strings.Cut(ref, "@/")
if !gotRepoPart {
repo = defaultOSTreeRepo
} else {
repo = "/" + repoPart
}
return NewReference(image, repo)
}
// NewReference returns an OSTree reference for a specified repo and image.
func NewReference(image string, repo string) (types.ImageReference, error) {
// image is not _really_ in a containers/image/docker/reference format;
// as far as the libOSTree ociimage/* namespace is concerned, it is more or
// less an arbitrary string with an implied tag.
// Parse the image using reference.ParseNormalizedNamed so that we can
// check whether the images has a tag specified and we can add ":latest" if needed
ostreeImage, err := reference.ParseNormalizedNamed(image)
if err != nil {
return nil, err
}
if reference.IsNameOnly(ostreeImage) {
image = image + ":latest"
}
resolved, err := explicitfilepath.ResolvePathToFullyExplicit(repo)
if err != nil {
// With os.IsNotExist(err), the parent directory of repo is also not existent;
// that should ordinarily not happen, but it would be a bit weird to reject
// references which do not specify a repo just because the implicit defaultOSTreeRepo
// does not exist.
if os.IsNotExist(err) && repo == defaultOSTreeRepo {
resolved = repo
} else {
return nil, err
}
}
// This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces
// from being ambiguous with values of PolicyConfigurationIdentity.
if strings.Contains(resolved, ":") {
return nil, fmt.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved)
}
return ostreeReference{
image: image,
branchName: encodeOStreeRef(image),
repo: resolved,
}, nil
}
func (ref ostreeReference) Transport() types.ImageTransport {
return Transport
}
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref ostreeReference) StringWithinTransport() string {
return fmt.Sprintf("%s@%s", ref.image, ref.repo)
}
// DockerReference returns a Docker reference associated with this reference
// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
func (ref ostreeReference) DockerReference() reference.Named {
return nil
}
func (ref ostreeReference) PolicyConfigurationIdentity() string {
return fmt.Sprintf("%s:%s", ref.repo, ref.image)
}
// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
// in order, terminating on first match, and an implicit "" is always checked at the end.
// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
// and each following element to be a prefix of the element preceding it.
func (ref ostreeReference) PolicyConfigurationNamespaces() []string {
repo, _, gotTag := strings.Cut(ref.image, ":")
if !gotTag { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag.
panic(fmt.Sprintf("Internal inconsistency: ref.image value %q does not have a :tag", ref.image))
}
name := repo
res := []string{}
for {
res = append(res, fmt.Sprintf("%s:%s", ref.repo, name))
lastSlash := strings.LastIndex(name, "/")
if lastSlash == -1 {
break
}
name = name[:lastSlash]
}
return res
}
func (s *ostreeImageCloser) Size() (int64, error) {
return s.size, nil
}
// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
// The caller must call .Close() on the returned ImageCloser.
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
func (ref ostreeReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
return image.FromReference(ctx, sys, ref)
}
// NewImageSource returns a types.ImageSource for this reference.
// The caller must call .Close() on the returned ImageSource.
func (ref ostreeReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
var tmpDir string
if sys == nil || sys.OSTreeTmpDirPath == "" {
tmpDir = os.TempDir()
} else {
tmpDir = sys.OSTreeTmpDirPath
}
return newImageSource(tmpDir, ref)
}
// NewImageDestination returns a types.ImageDestination for this reference.
// The caller must call .Close() on the returned ImageDestination.
func (ref ostreeReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
var tmpDir string
if sys == nil || sys.OSTreeTmpDirPath == "" {
tmpDir = os.TempDir()
} else {
tmpDir = sys.OSTreeTmpDirPath
}
return newImageDestination(ref, tmpDir)
}
// DeleteImage deletes the named image from the registry, if supported.
func (ref ostreeReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
return errors.New("Deleting images not implemented for ostree: images")
}
var ostreeRefRegexp = regexp.Delayed(`^[A-Za-z0-9.-]$`)
func encodeOStreeRef(in string) string {
var buffer bytes.Buffer
for i := range in {
sub := in[i : i+1]
if ostreeRefRegexp.MatchString(sub) {
buffer.WriteString(sub)
} else {
buffer.WriteString(fmt.Sprintf("_%02X", sub[0]))
}
}
return buffer.String()
}
// manifestPath returns a path for the manifest within a ostree using our conventions.
func (ref ostreeReference) manifestPath() string {
return filepath.Join("manifest", "manifest.json")
}
// signaturePath returns a path for a signature within a ostree using our conventions.
func (ref ostreeReference) signaturePath(index int) string {
return filepath.Join("manifest", fmt.Sprintf("signature-%d", index+1))
}

View File

@@ -421,10 +421,11 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
}
}()
differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Digest, srcInfo.Size, srcInfo.Annotations, &fetcher)
differ, err := chunked.NewDiffer(ctx, s.imageRef.transport.store, srcInfo.Digest, srcInfo.Size, srcInfo.Annotations, &fetcher)
if err != nil {
return private.UploadedBlob{}, err
}
defer differ.Close()
out, err := s.imageRef.transport.store.PrepareStagedLayer(nil, differ)
if err != nil {
@@ -496,9 +497,12 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
succeeded = true
return private.UploadedBlob{
Digest: blobDigest,
Size: srcInfo.Size,
}, nil
Digest: blobDigest,
Size: srcInfo.Size,
}, s.queueOrCommit(options.LayerIndex, addedLayerInfo{
digest: blobDigest,
emptyLayer: options.EmptyLayer,
})
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination

View File

@@ -19,10 +19,13 @@ import (
_ "github.com/containers/image/v5/sif"
_ "github.com/containers/image/v5/tarball"
// The docker-daemon transport is registeredy by docker_daemon*.go
// The ostree transport is registered by ostree*.go
// The storage transport is registered by storage*.go
)
func init() {
transports.Register(transports.NewStubTransport("ostree")) // This transport was completely removed.
}
// ParseImageName converts a URL-like image name to a types.ImageReference.
func ParseImageName(imgName string) (types.ImageReference, error) {
// Keep this in sync with TransportFromImageName!

View File

@@ -1,8 +0,0 @@
//go:build containers_image_ostree && linux
package alltransports
import (
// Register the ostree transport
_ "github.com/containers/image/v5/ostree"
)

View File

@@ -1,9 +0,0 @@
//go:build !containers_image_ostree || !linux
package alltransports
import "github.com/containers/image/v5/transports"
func init() {
transports.Register(transports.NewStubTransport("ostree"))
}

View File

@@ -72,7 +72,7 @@ func ImageName(ref types.ImageReference) string {
return ref.Transport().Name() + ":" + ref.StringWithinTransport()
}
var deprecatedTransports = set.NewWithValues("atomic")
var deprecatedTransports = set.NewWithValues("atomic", "ostree")
// ListNames returns a list of non deprecated transport names.
// Deprecated transports can be used, but are not presented to users.

View File

@@ -659,6 +659,8 @@ type SystemContext struct {
// If true, the physical pull source of docker transport images logged as info level
DockerLogMirrorChoice bool
// Directory to use for OSTree temporary files
//
// Deprecated: The OSTree transport has been removed.
OSTreeTmpDirPath string
// If true, all blobs will have precomputed digests to ensure layers are not uploaded that already exist on the registry.
// Note that this requires writing blobs to temporary files, and takes more time than the default behavior,

View File

@@ -6,9 +6,9 @@ const (
// VersionMajor is for an API incompatible changes
VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 35
VersionMinor = 36
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 0
VersionPatch = 2
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = ""

View File

@@ -17,13 +17,13 @@ env:
####
#### Cache-image names to test with (double-quotes around names are critical)
###
FEDORA_NAME: "fedora-41"
FEDORA_NAME: "fedora-42"
DEBIAN_NAME: "debian-13"
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
# VM Image built in containers/automation_images
IMAGE_SUFFIX: "c20250324t111922z-f41f40d13"
IMAGE_SUFFIX: "c20250422t130822z-f42f41d13"
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
@@ -128,6 +128,7 @@ lint_task:
apt-get update
apt-get install -y libbtrfs-dev libsubid-dev
test_script: |
[ -n "${CIRRUS_BASE_SHA}" ] && git fetch origin ${CIRRUS_BASE_SHA} # Make ${CIRRUS_BASE_SHA} resolvable for git-validation
make TAGS=regex_precompile local-validate
make lint
make clean

View File

@@ -35,7 +35,7 @@ TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /de
# N/B: This value is managed by Renovate, manual changes are
# possible, as long as they don't disturb the formatting
# (i.e. DO NOT ADD A 'v' prefix!)
GOLANGCI_LINT_VERSION := 2.0.2
GOLANGCI_LINT_VERSION := 2.2.1
default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs

View File

@@ -1 +1 @@
1.58.0
1.59.1

View File

@@ -207,7 +207,6 @@ type LayerStore interface {
Mounted(id string) (int, error)
ParentOwners(id string) (uids, gids []int, err error)
ApplyDiff(to string, diff io.Reader) (int64, error)
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
DifferTarget(id string) (string, error)
LoadLocked() error
PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error)

View File

@@ -36,6 +36,7 @@ import (
"time"
graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/internal/tempdir"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/directory"
@@ -772,8 +773,8 @@ func (a *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp
return fmt.Errorf("aufs doesn't support changing ID mappings")
}
// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS
func (a *Driver) SupportsShifting() bool {
// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs to the provided mapping in an userNS
func (a *Driver) SupportsShifting(uidmap, gidmap []idtools.IDMap) bool {
return false
}
@@ -781,3 +782,14 @@ func (a *Driver) SupportsShifting() bool {
func (a *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
return graphdriver.DedupResult{}, nil
}
// DeferredRemove is not implemented.
// It calls Remove directly.
func (a *Driver) DeferredRemove(id string) (tempdir.CleanupTempDirFunc, error) {
return nil, a.Remove(id)
}
// GetTempDirRootDirs is not implemented.
func (a *Driver) GetTempDirRootDirs() []string {
return []string{}
}

View File

@@ -30,6 +30,7 @@ import (
"unsafe"
graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/internal/tempdir"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/fileutils"
"github.com/containers/storage/pkg/idtools"
@@ -678,3 +679,14 @@ func (d *Driver) AdditionalImageStores() []string {
func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
return graphdriver.DedupResult{}, nil
}
// DeferredRemove is not implemented.
// It calls Remove directly.
func (d *Driver) DeferredRemove(id string) (tempdir.CleanupTempDirFunc, error) {
return nil, d.Remove(id)
}
// GetTempDirRootDirs is not implemented.
func (d *Driver) GetTempDirRootDirs() []string {
return []string{}
}

View File

@@ -131,7 +131,7 @@ func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost
return ChownPathByMaps(layerFs, toContainer, toHost)
}
// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS
func (n *naiveLayerIDMapUpdater) SupportsShifting() bool {
// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs to the provided mapping in an userNS
func (n *naiveLayerIDMapUpdater) SupportsShifting(uidmap, gidmap []idtools.IDMap) bool {
return false
}

View File

@@ -9,6 +9,7 @@ import (
"strings"
"github.com/containers/storage/internal/dedup"
"github.com/containers/storage/internal/tempdir"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/fileutils"
@@ -123,7 +124,17 @@ type ProtoDriver interface {
// and parent, with contents identical to the specified template layer.
CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *CreateOpts, readWrite bool) error
// Remove attempts to remove the filesystem layer with this id.
// This is soft-deprecated and should not get any new callers; use DeferredRemove.
Remove(id string) error
// DeferredRemove is used to remove the filesystem layer with this id.
// This removal happen immediately (the layer is no longer usable),
// but physically deleting the files may be deferred.
// Caller MUST call returned Cleanup function EVEN IF the function returns an error.
DeferredRemove(id string) (tempdir.CleanupTempDirFunc, error)
// GetTempDirRootDirs returns the root directories for temporary directories.
// Multiple directories may be returned when drivers support different filesystems
// for layers (e.g., overlay with imageStore vs home directory).
GetTempDirRootDirs() []string
// Get returns the mountpoint for the layered filesystem referred
// to by this id. You can optionally specify a mountLabel or "".
// Optionally it gets the mappings used to create the layer.
@@ -193,8 +204,9 @@ type LayerIDMapUpdater interface {
UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error
// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in a
// image and it is not required to Chown the files when running in an user namespace.
SupportsShifting() bool
// image to the provided mapping and it is not required to Chown the files when running in
// an user namespace.
SupportsShifting(uidmap, gidmap []idtools.IDMap) bool
}
// Driver is the interface for layered/snapshot file system drivers.
@@ -216,8 +228,10 @@ type DriverWithDifferOutput struct {
CompressedDigest digest.Digest
Metadata string
BigData map[string][]byte
TarSplit []byte // nil if not available
TOCDigest digest.Digest
// TarSplit is owned by the [DriverWithDifferOutput], and must be closed by calling one of
// [Store.ApplyStagedLayer]/[Store.CleanupStagedLayer]. It is nil if not available.
TarSplit *os.File
TOCDigest digest.Digest
// RootDirMode is the mode of the root directory of the layer, if specified.
RootDirMode *os.FileMode
// Artifacts is a collection of additional artifacts
@@ -267,6 +281,7 @@ type DifferOptions struct {
// This API is experimental and can be changed without bumping the major version number.
type Differ interface {
ApplyDiff(dest string, options *archive.TarOptions, differOpts *DifferOptions) (DriverWithDifferOutput, error)
Close() error
}
// DriverWithDiffer is the interface for direct diff access.

View File

@@ -23,6 +23,8 @@ import (
"github.com/containers/storage/drivers/overlayutils"
"github.com/containers/storage/drivers/quota"
"github.com/containers/storage/internal/dedup"
"github.com/containers/storage/internal/staging_lockfile"
"github.com/containers/storage/internal/tempdir"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/directory"
@@ -30,7 +32,6 @@ import (
"github.com/containers/storage/pkg/fsutils"
"github.com/containers/storage/pkg/idmap"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/lockfile"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/system"
@@ -80,10 +81,11 @@ const (
// that mounts do not fail due to length.
const (
linkDir = "l"
stagingDir = "staging"
lowerFile = "lower"
maxDepth = 500
linkDir = "l"
stagingDir = "staging"
tempDirName = "tempdirs"
lowerFile = "lower"
maxDepth = 500
stagingLockFile = "staging.lock"
@@ -133,7 +135,7 @@ type Driver struct {
stagingDirsLocksMutex sync.Mutex
// stagingDirsLocks access is not thread safe, it is required that callers take
// stagingDirsLocksMutex on each access to guard against concurrent map writes.
stagingDirsLocks map[string]*lockfile.LockFile
stagingDirsLocks map[string]*staging_lockfile.StagingLockFile
supportsIDMappedMounts *bool
}
@@ -222,7 +224,7 @@ func checkAndRecordIDMappedSupport(home, runhome string) (bool, error) {
return supportsIDMappedMounts, err
}
func checkAndRecordOverlaySupport(fsMagic graphdriver.FsMagic, home, runhome string) (bool, error) {
func checkAndRecordOverlaySupport(home, runhome string) (bool, error) {
var supportsDType bool
if os.Geteuid() != 0 {
@@ -242,7 +244,7 @@ func checkAndRecordOverlaySupport(fsMagic graphdriver.FsMagic, home, runhome str
return false, errors.New(overlayCacheText)
}
} else {
supportsDType, err = supportsOverlay(home, fsMagic, 0, 0)
supportsDType, err = supportsOverlay(home, 0, 0)
if err != nil {
os.Remove(filepath.Join(home, linkDir))
os.Remove(home)
@@ -388,7 +390,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
t := true
supportsVolatile = &t
} else {
supportsDType, err = checkAndRecordOverlaySupport(fsMagic, home, runhome)
supportsDType, err = checkAndRecordOverlaySupport(home, runhome)
if err != nil {
return nil, err
}
@@ -442,7 +444,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
usingComposefs: opts.useComposefs,
options: *opts,
stagingDirsLocksMutex: sync.Mutex{},
stagingDirsLocks: make(map[string]*lockfile.LockFile),
stagingDirsLocks: make(map[string]*staging_lockfile.StagingLockFile),
}
d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d))
@@ -666,16 +668,11 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) {
}
}
fsMagic, err := graphdriver.GetFSMagic(home)
if err != nil {
return false, err
}
supportsDType, _ := checkAndRecordOverlaySupport(fsMagic, home, runhome)
supportsDType, _ := checkAndRecordOverlaySupport(home, runhome)
return supportsDType, nil
}
func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGID int) (supportsDType bool, err error) {
func supportsOverlay(home string, rootUID, rootGID int) (supportsDType bool, err error) {
selinuxLabelTest := selinux.PrivContainerMountLabel()
logLevel := logrus.ErrorLevel
@@ -828,7 +825,7 @@ func (d *Driver) Status() [][2]string {
{"Supports d_type", strconv.FormatBool(d.supportsDType)},
{"Native Overlay Diff", strconv.FormatBool(!d.useNaiveDiff())},
{"Using metacopy", strconv.FormatBool(d.usingMetacopy)},
{"Supports shifting", strconv.FormatBool(d.SupportsShifting())},
{"Supports shifting", strconv.FormatBool(d.SupportsShifting(nil, nil))},
{"Supports volatile", strconv.FormatBool(supportsVolatile)},
}
}
@@ -874,7 +871,9 @@ func (d *Driver) Cleanup() error {
func (d *Driver) pruneStagingDirectories() bool {
d.stagingDirsLocksMutex.Lock()
for _, lock := range d.stagingDirsLocks {
lock.Unlock()
if err := lock.UnlockAndDelete(); err != nil {
logrus.Warnf("Failed to unlock and delete staging lock file: %v", err)
}
}
clear(d.stagingDirsLocks)
d.stagingDirsLocksMutex.Unlock()
@@ -886,17 +885,15 @@ func (d *Driver) pruneStagingDirectories() bool {
if err == nil {
for _, dir := range dirs {
stagingDirToRemove := filepath.Join(stagingDirBase, dir.Name())
lock, err := lockfile.GetLockFile(filepath.Join(stagingDirToRemove, stagingLockFile))
lock, err := staging_lockfile.TryLockPath(filepath.Join(stagingDirToRemove, stagingLockFile))
if err != nil {
anyPresent = true
continue
}
if err := lock.TryLock(); err != nil {
anyPresent = true
continue
}
_ = os.RemoveAll(stagingDirToRemove)
lock.Unlock()
if err := lock.UnlockAndDelete(); err != nil {
logrus.Warnf("Failed to unlock and delete staging lock file: %v", err)
}
}
}
return anyPresent
@@ -1310,17 +1307,22 @@ func (d *Driver) optsAppendMappings(opts string, uidMaps, gidMaps []idtools.IDMa
// Remove cleans the directories that are created for this id.
func (d *Driver) Remove(id string) error {
return d.removeCommon(id, system.EnsureRemoveAll)
}
func (d *Driver) removeCommon(id string, cleanup func(string) error) error {
dir := d.dir(id)
lid, err := os.ReadFile(path.Join(dir, "link"))
if err == nil {
if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil {
linkPath := path.Join(d.home, linkDir, string(lid))
if err := cleanup(linkPath); err != nil {
logrus.Debugf("Failed to remove link: %v", err)
}
}
d.releaseAdditionalLayerByID(id)
if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) {
if err := cleanup(dir); err != nil && !os.IsNotExist(err) {
return err
}
if d.quotaCtl != nil {
@@ -1332,6 +1334,41 @@ func (d *Driver) Remove(id string) error {
return nil
}
func (d *Driver) GetTempDirRootDirs() []string {
tempDirs := []string{filepath.Join(d.home, tempDirName)}
// Include imageStore temp directory if it's configured
// Writable layers can only be in d.home or d.imageStore, not in additional image stores
if d.imageStore != "" {
tempDirs = append(tempDirs, filepath.Join(d.imageStore, d.name, tempDirName))
}
return tempDirs
}
// Determine the correct temp directory root based on where the layer actually exists.
func (d *Driver) getTempDirRoot(id string) string {
layerDir := d.dir(id)
if d.imageStore != "" {
expectedLayerDir := path.Join(d.imageStore, d.name, id)
if layerDir == expectedLayerDir {
return filepath.Join(d.imageStore, d.name, tempDirName)
}
}
return filepath.Join(d.home, tempDirName)
}
func (d *Driver) DeferredRemove(id string) (tempdir.CleanupTempDirFunc, error) {
tempDirRoot := d.getTempDirRoot(id)
t, err := tempdir.NewTempDir(tempDirRoot)
if err != nil {
return nil, err
}
if err := d.removeCommon(id, t.StageDeletion); err != nil {
return t.Cleanup, fmt.Errorf("failed to add to stage directory: %w", err)
}
return t.Cleanup, nil
}
// recreateSymlinks goes through the driver's home directory and checks if the diff directory
// under each layer has a symlink created for it under the linkDir. If the symlink does not
// exist, it creates them
@@ -1358,8 +1395,8 @@ func (d *Driver) recreateSymlinks() error {
// Check that for each layer, there's a link in "l" with the name in
// the layer's "link" file that points to the layer's "diff" directory.
for _, dir := range dirs {
// Skip over the linkDir and anything that is not a directory
if dir.Name() == linkDir || !dir.IsDir() {
// Skip over the linkDir, stagingDir, tempDirName and anything that is not a directory
if dir.Name() == linkDir || dir.Name() == stagingDir || dir.Name() == tempDirName || !dir.IsDir() {
continue
}
// Read the "link" file under each layer to get the name of the symlink
@@ -1483,7 +1520,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
readWrite := !inAdditionalStore
if !d.SupportsShifting() || options.DisableShifting {
if !d.SupportsShifting(options.UidMaps, options.GidMaps) || options.DisableShifting {
disableShifting = true
}
@@ -2027,7 +2064,7 @@ func (d *Driver) ListLayers() ([]string, error) {
for _, entry := range entries {
id := entry.Name()
switch id {
case linkDir, stagingDir, quota.BackingFsBlockDeviceLink, mountProgramFlagFile:
case linkDir, stagingDir, tempDirName, quota.BackingFsBlockDeviceLink, mountProgramFlagFile:
// expected, but not a layer. skip it
continue
default:
@@ -2178,7 +2215,10 @@ func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error {
d.stagingDirsLocksMutex.Lock()
if lock, ok := d.stagingDirsLocks[parentStagingDir]; ok {
delete(d.stagingDirsLocks, parentStagingDir)
lock.Unlock()
if err := lock.UnlockAndDelete(); err != nil {
d.stagingDirsLocksMutex.Unlock()
return err
}
}
d.stagingDirsLocksMutex.Unlock()
@@ -2233,7 +2273,7 @@ func (d *Driver) ApplyDiffWithDiffer(options *graphdriver.ApplyDiffWithDifferOpt
return graphdriver.DriverWithDifferOutput{}, err
}
lock, err := lockfile.GetLockFile(filepath.Join(layerDir, stagingLockFile))
lock, err := staging_lockfile.TryLockPath(filepath.Join(layerDir, stagingLockFile))
if err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
@@ -2242,13 +2282,14 @@ func (d *Driver) ApplyDiffWithDiffer(options *graphdriver.ApplyDiffWithDifferOpt
d.stagingDirsLocksMutex.Lock()
delete(d.stagingDirsLocks, layerDir)
d.stagingDirsLocksMutex.Unlock()
lock.Unlock()
if err := lock.UnlockAndDelete(); err != nil {
errRet = errors.Join(errRet, err)
}
}
}()
d.stagingDirsLocksMutex.Lock()
d.stagingDirsLocks[layerDir] = lock
d.stagingDirsLocksMutex.Unlock()
lock.Lock()
logrus.Debugf("Applying differ in %s", applyDir)
@@ -2274,7 +2315,7 @@ func (d *Driver) ApplyDiffWithDiffer(options *graphdriver.ApplyDiffWithDifferOpt
}
// ApplyDiffFromStagingDirectory applies the changes using the specified staging directory.
func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffWithDifferOpts) error {
func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffWithDifferOpts) (errRet error) {
stagingDirectory := diffOutput.Target
parentStagingDir := filepath.Dir(stagingDirectory)
@@ -2282,7 +2323,9 @@ func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *gr
d.stagingDirsLocksMutex.Lock()
if lock, ok := d.stagingDirsLocks[parentStagingDir]; ok {
delete(d.stagingDirsLocks, parentStagingDir)
lock.Unlock()
if err := lock.UnlockAndDelete(); err != nil {
errRet = errors.Join(errRet, err)
}
}
d.stagingDirsLocksMutex.Unlock()
}()
@@ -2553,12 +2596,20 @@ func (d *Driver) supportsIDmappedMounts() bool {
return false
}
// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS
func (d *Driver) SupportsShifting() bool {
// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs to the provided mapping in an userNS
func (d *Driver) SupportsShifting(uidmap, gidmap []idtools.IDMap) bool {
if os.Getenv("_CONTAINERS_OVERLAY_DISABLE_IDMAP") == "yes" {
return false
}
if d.options.mountProgram != "" {
// fuse-overlayfs supports only contiguous mappings, since it performs the mapping on the
// upper layer too, to avoid https://github.com/containers/podman/issues/10272
if !idtools.IsContiguous(uidmap) {
return false
}
if !idtools.IsContiguous(gidmap) {
return false
}
return true
}
return d.supportsIDmappedMounts()

View File

@@ -11,6 +11,7 @@ import (
graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/internal/dedup"
"github.com/containers/storage/internal/tempdir"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/fileutils"
@@ -22,7 +23,10 @@ import (
"github.com/vbatts/tar-split/tar/storage"
)
const defaultPerms = os.FileMode(0o555)
const (
defaultPerms = os.FileMode(0o555)
tempDirName = "tempdirs"
)
func init() {
graphdriver.MustRegister("vfs", Init)
@@ -244,6 +248,42 @@ func (d *Driver) Remove(id string) error {
return system.EnsureRemoveAll(d.dir(id))
}
func (d *Driver) GetTempDirRootDirs() []string {
tempDirs := []string{filepath.Join(d.home, tempDirName)}
// Include imageStore temp directory if it's configured
// Writable layers can only be in d.home or d.imageStore, not in additionalHomes (which are read-only)
if d.imageStore != "" {
tempDirs = append(tempDirs, filepath.Join(d.imageStore, d.String(), tempDirName))
}
return tempDirs
}
// Determine the correct temp directory root based on where the layer actually exists.
func (d *Driver) getTempDirRoot(id string) string {
layerDir := d.dir(id)
if d.imageStore != "" {
expectedLayerDir := filepath.Join(d.imageStore, d.String(), "dir", filepath.Base(id))
if layerDir == expectedLayerDir {
return filepath.Join(d.imageStore, d.String(), tempDirName)
}
}
return filepath.Join(d.home, tempDirName)
}
func (d *Driver) DeferredRemove(id string) (tempdir.CleanupTempDirFunc, error) {
tempDirRoot := d.getTempDirRoot(id)
t, err := tempdir.NewTempDir(tempDirRoot)
if err != nil {
return nil, err
}
layerDir := d.dir(id)
if err := t.StageDeletion(layerDir); err != nil {
return t.Cleanup, err
}
return t.Cleanup, nil
}
// Get returns the directory for the given id.
func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) {
dir := d.dir(id)
@@ -312,9 +352,9 @@ func (d *Driver) AdditionalImageStores() []string {
return nil
}
// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS
func (d *Driver) SupportsShifting() bool {
return d.updater.SupportsShifting()
// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs to the provided mapping in an userNS
func (d *Driver) SupportsShifting(uidmap, gidmap []idtools.IDMap) bool {
return d.updater.SupportsShifting(uidmap, gidmap)
}
// UpdateLayerIDMap updates ID mappings in a from matching the ones specified

View File

@@ -24,6 +24,7 @@ import (
"github.com/Microsoft/go-winio/backuptar"
"github.com/Microsoft/hcsshim"
graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/internal/tempdir"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/fileutils"
@@ -986,8 +987,8 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp
return fmt.Errorf("windows doesn't support changing ID mappings")
}
// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS
func (d *Driver) SupportsShifting() bool {
// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs to the provided mapping in an userNS
func (d *Driver) SupportsShifting(uidmap, gidmap []idtools.IDMap) bool {
return false
}
@@ -1014,3 +1015,14 @@ func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) {
}
return &options, nil
}
// DeferredRemove is not implemented.
// It calls Remove directly.
func (d *Driver) DeferredRemove(id string) (tempdir.CleanupTempDirFunc, error) {
return nil, d.Remove(id)
}
// GetTempDirRootDirs is not implemented.
func (d *Driver) GetTempDirRootDirs() []string {
return []string{}
}

View File

@@ -13,6 +13,7 @@ import (
"time"
graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/internal/tempdir"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/mount"
@@ -406,6 +407,12 @@ func (d *Driver) Remove(id string) error {
return nil
}
// DeferredRemove is not implemented.
// It calls Remove directly.
func (d *Driver) DeferredRemove(id string) (tempdir.CleanupTempDirFunc, error) {
return nil, d.Remove(id)
}
// Get returns the mountpoint for the given id after creating the target directories if necessary.
func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) {
mountpoint := d.mountPath(id)
@@ -516,3 +523,8 @@ func (d *Driver) AdditionalImageStores() []string {
func (d *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) {
return graphdriver.DedupResult{}, nil
}
// GetTempDirRootDirs is not implemented.
func (d *Driver) GetTempDirRootDirs() []string {
return []string{}
}

View File

@@ -0,0 +1,64 @@
package rawfilelock
import (
"os"
)
type LockType byte
const (
ReadLock LockType = iota
WriteLock
)
type FileHandle = fileHandle
// OpenLock opens a file for locking
// WARNING: This is the underlying file locking primitive of the OS;
// because closing FileHandle releases the lock, it is not suitable for use
// if there is any chance of two concurrent goroutines attempting to use the same lock.
// Most users should use the higher-level operations from internal/staging_lockfile or pkg/lockfile.
func OpenLock(path string, readOnly bool) (FileHandle, error) {
flags := os.O_CREATE
if readOnly {
flags |= os.O_RDONLY
} else {
flags |= os.O_RDWR
}
fd, err := openHandle(path, flags)
if err == nil {
return fd, nil
}
return fd, &os.PathError{Op: "open", Path: path, Err: err}
}
// TryLockFile attempts to lock a file handle
func TryLockFile(fd FileHandle, lockType LockType) error {
return lockHandle(fd, lockType, true)
}
// LockFile locks a file handle
func LockFile(fd FileHandle, lockType LockType) error {
return lockHandle(fd, lockType, false)
}
// UnlockAndClose unlocks and closes a file handle
func UnlockAndCloseHandle(fd FileHandle) {
unlockAndCloseHandle(fd)
}
// CloseHandle closes a file handle without unlocking
//
// WARNING: This is a last-resort function for error handling only!
// On Unix systems, closing a file descriptor automatically releases any locks,
// so "closing without unlocking" is impossible. This function will release
// the lock as a side effect of closing the file.
//
// This function should only be used in error paths where the lock state
// is already corrupted or when giving up on lock management entirely.
// Normal code should use UnlockAndCloseHandle instead.
func CloseHandle(fd FileHandle) {
closeHandle(fd)
}

View File

@@ -0,0 +1,49 @@
//go:build !windows
package rawfilelock
import (
"time"
"golang.org/x/sys/unix"
)
type fileHandle uintptr
func openHandle(path string, mode int) (fileHandle, error) {
mode |= unix.O_CLOEXEC
fd, err := unix.Open(path, mode, 0o644)
return fileHandle(fd), err
}
func lockHandle(fd fileHandle, lType LockType, nonblocking bool) error {
fType := unix.F_RDLCK
if lType != ReadLock {
fType = unix.F_WRLCK
}
lk := unix.Flock_t{
Type: int16(fType),
Whence: int16(unix.SEEK_SET),
Start: 0,
Len: 0,
}
cmd := unix.F_SETLKW
if nonblocking {
cmd = unix.F_SETLK
}
for {
err := unix.FcntlFlock(uintptr(fd), cmd, &lk)
if err == nil || nonblocking {
return err
}
time.Sleep(10 * time.Millisecond)
}
}
func unlockAndCloseHandle(fd fileHandle) {
unix.Close(int(fd))
}
func closeHandle(fd fileHandle) {
unix.Close(int(fd))
}

View File

@@ -0,0 +1,48 @@
//go:build windows
package rawfilelock
import (
"golang.org/x/sys/windows"
)
const (
reserved = 0
allBytes = ^uint32(0)
)
type fileHandle windows.Handle
func openHandle(path string, mode int) (fileHandle, error) {
mode |= windows.O_CLOEXEC
fd, err := windows.Open(path, mode, windows.S_IWRITE)
return fileHandle(fd), err
}
func lockHandle(fd fileHandle, lType LockType, nonblocking bool) error {
flags := 0
if lType != ReadLock {
flags = windows.LOCKFILE_EXCLUSIVE_LOCK
}
if nonblocking {
flags |= windows.LOCKFILE_FAIL_IMMEDIATELY
}
ol := new(windows.Overlapped)
if err := windows.LockFileEx(windows.Handle(fd), uint32(flags), reserved, allBytes, allBytes, ol); err != nil {
if nonblocking {
return err
}
panic(err)
}
return nil
}
func unlockAndCloseHandle(fd fileHandle) {
ol := new(windows.Overlapped)
windows.UnlockFileEx(windows.Handle(fd), reserved, allBytes, allBytes, ol)
closeHandle(fd)
}
func closeHandle(fd fileHandle) {
windows.Close(windows.Handle(fd))
}

View File

@@ -0,0 +1,147 @@
package staging_lockfile
import (
"fmt"
"os"
"path/filepath"
"sync"
"github.com/containers/storage/internal/rawfilelock"
)
// StagingLockFile represents a file lock used to coordinate access to staging areas.
// Typical usage is via CreateAndLock or TryLockPath, both of which return a StagingLockFile
// that must eventually be released with UnlockAndDelete. This ensures that access
// to the staging file is properly synchronized both within and across processes.
//
// WARNING: This struct MUST NOT be created manually. Use the provided helper functions instead.
type StagingLockFile struct {
// Locking invariant: If stagingLockFileLock is not locked, a StagingLockFile for a particular
// path exists if the current process currently owns the lock for that file, and it is recorded in stagingLockFiles.
//
// The following fields can only be accessed by the goroutine owning the lock.
//
// An empty string in the file field means that the lock has been released and the StagingLockFile is no longer valid.
file string // Also the key in stagingLockFiles
fd rawfilelock.FileHandle
}
const maxRetries = 1000
var (
stagingLockFiles map[string]*StagingLockFile
stagingLockFileLock sync.Mutex
)
// tryAcquireLockForFile attempts to acquire a lock for the specified file path.
func tryAcquireLockForFile(path string) (*StagingLockFile, error) {
cleanPath, err := filepath.Abs(path)
if err != nil {
return nil, fmt.Errorf("ensuring that path %q is an absolute path: %w", path, err)
}
stagingLockFileLock.Lock()
defer stagingLockFileLock.Unlock()
if stagingLockFiles == nil {
stagingLockFiles = make(map[string]*StagingLockFile)
}
if _, ok := stagingLockFiles[cleanPath]; ok {
return nil, fmt.Errorf("lock %q is used already with other thread", cleanPath)
}
fd, err := rawfilelock.OpenLock(cleanPath, false)
if err != nil {
return nil, err
}
if err = rawfilelock.TryLockFile(fd, rawfilelock.WriteLock); err != nil {
// Lock acquisition failed, but holding stagingLockFileLock ensures
// no other goroutine in this process could have obtained a lock for this file,
// so closing it is still safe.
rawfilelock.CloseHandle(fd)
return nil, fmt.Errorf("failed to acquire lock on %q: %w", cleanPath, err)
}
lockFile := &StagingLockFile{
file: cleanPath,
fd: fd,
}
stagingLockFiles[cleanPath] = lockFile
return lockFile, nil
}
// UnlockAndDelete releases the lock, removes the associated file from the filesystem.
//
// WARNING: After this operation, the StagingLockFile becomes invalid for further use.
func (l *StagingLockFile) UnlockAndDelete() error {
stagingLockFileLock.Lock()
defer stagingLockFileLock.Unlock()
if l.file == "" {
// Panic when unlocking an unlocked lock. That's a violation
// of the lock semantics and will reveal such.
panic("calling Unlock on unlocked lock")
}
defer func() {
// Its important that this happens while we are still holding stagingLockFileLock, to ensure
// that no other goroutine has l.file open = that this close is not unlocking the lock under any
// other goroutine. (defer ordering is LIFO, so this will happen before we release the stagingLockFileLock)
rawfilelock.UnlockAndCloseHandle(l.fd)
delete(stagingLockFiles, l.file)
l.file = ""
}()
if err := os.Remove(l.file); err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
// CreateAndLock creates a new temporary file in the specified directory with the given pattern,
// then creates and locks a StagingLockFile for it. The file is created using os.CreateTemp.
// Typically, the caller would use the returned lock file path to derive a path to the lock-controlled resource
// (e.g. by replacing the "pattern" part of the returned file name with a different prefix)
// Caller MUST call UnlockAndDelete() on the returned StagingLockFile to release the lock and delete the file.
//
// Returns:
// - The locked StagingLockFile
// - The name of created lock file
// - Any error that occurred during the process
//
// If the file cannot be locked, this function will retry up to maxRetries times before failing.
func CreateAndLock(dir string, pattern string) (*StagingLockFile, string, error) {
for try := 0; ; try++ {
file, err := os.CreateTemp(dir, pattern)
if err != nil {
return nil, "", err
}
file.Close()
path := file.Name()
l, err := tryAcquireLockForFile(path)
if err != nil {
if try < maxRetries {
continue // Retry if the lock cannot be acquired
}
return nil, "", fmt.Errorf(
"failed to allocate lock in %q after %d attempts; last failure on %q: %w",
dir, try, filepath.Base(path), err,
)
}
return l, filepath.Base(path), nil
}
}
// TryLockPath attempts to acquire a lock on an specific path. If the file does not exist,
// it will be created.
//
// Warning: If acquiring a lock is successful, it returns a new StagingLockFile
// instance for the file. Caller MUST call UnlockAndDelete() on the returned StagingLockFile
// to release the lock and delete the file.
func TryLockPath(path string) (*StagingLockFile, error) {
return tryAcquireLockForFile(path)
}

View File

@@ -0,0 +1,243 @@
package tempdir
import (
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/containers/storage/internal/staging_lockfile"
"github.com/sirupsen/logrus"
)
/*
Locking rules and invariants for TempDir and its recovery mechanism:
1. TempDir Instance Locks:
- Path: 'RootDir/lock-XYZ' (in the root directory)
- Each TempDir instance creates and holds an exclusive lock on this file immediately
during NewTempDir() initialization.
- This lock signifies that the temporary directory is in active use by the
process/goroutine that holds the TempDir object.
2. Stale Directory Recovery (separate operation):
- RecoverStaleDirs() can be called independently to identify and clean up stale
temporary directories.
- For each potential stale directory (found by listPotentialStaleDirs), it
attempts to TryLockPath() its instance lock file.
- If TryLockPath() succeeds: The directory is considered stale, and both the
directory and lock file are removed.
- If TryLockPath() fails: The directory is considered in active use by another
process/goroutine, and it's skipped.
3. TempDir Usage:
- NewTempDir() immediately creates both the instance lock and the temporary directory.
- TempDir.StageDeletion() moves files into the existing temporary directory with counter-based naming.
- Files moved into the temporary directory are renamed with a counter-based prefix
to ensure uniqueness (e.g., "0-filename", "1-filename").
- Once cleaned up, the TempDir instance cannot be reused - StageDeletion() will return an error.
4. Cleanup Process:
- TempDir.Cleanup() removes both the temporary directory and its lock file.
- The instance lock is unlocked and deleted after cleanup operations are complete.
- The TempDir instance becomes inactive after cleanup (internal fields are reset).
- The TempDir instance cannot be reused after Cleanup() - StageDeletion() will fail.
5. TempDir Lifetime:
- NewTempDir() creates both the TempDir manager and the actual temporary directory immediately.
- The temporary directory is created eagerly during NewTempDir().
- During its lifetime, the temporary directory is protected by its instance lock.
- The temporary directory exists until Cleanup() is called, which removes both
the directory and its lock file.
- Multiple TempDir instances can coexist in the same RootDir, each with its own
unique subdirectory and lock.
- After cleanup, the TempDir instance cannot be reused.
6. Example Directory Structure:
RootDir/
lock-ABC (instance lock for temp-dir-ABC)
temp-dir-ABC/
0-file1
1-file3
lock-XYZ (instance lock for temp-dir-XYZ)
temp-dir-XYZ/
0-file2
*/
const (
// tempDirPrefix is the prefix used for creating temporary directories.
tempDirPrefix = "temp-dir-"
// tempdirLockPrefix is the prefix used for creating lock files for temporary directories.
tempdirLockPrefix = "lock-"
)
// TempDir represents a temporary directory that is created in a specified root directory.
// It manages the lifecycle of the temporary directory, including creation, locking, and cleanup.
// Each TempDir instance is associated with a unique subdirectory in the root directory.
// Warning: The TempDir instance should be used in a single goroutine.
type TempDir struct {
RootDir string
tempDirPath string
// tempDirLock is a lock file (e.g., RootDir/lock-XYZ) specific to this
// TempDir instance, indicating it's in active use.
tempDirLock *staging_lockfile.StagingLockFile
tempDirLockPath string
// counter is used to generate unique filenames for added files.
counter uint64
}
// CleanupTempDirFunc is a function type that can be returned by operations
// which need to perform cleanup actions later.
type CleanupTempDirFunc func() error
// listPotentialStaleDirs scans the RootDir for directories that might be stale temporary directories.
// It identifies directories with the tempDirPrefix and their corresponding lock files with the tempdirLockPrefix.
// The function returns a map of IDs that correspond to both directories and lock files found.
// These IDs are extracted from the filenames by removing their respective prefixes.
func listPotentialStaleDirs(rootDir string) (map[string]struct{}, error) {
ids := make(map[string]struct{})
dirContent, err := os.ReadDir(rootDir)
if err != nil {
if os.IsNotExist(err) {
return nil, nil
}
return nil, fmt.Errorf("error reading temp dir %s: %w", rootDir, err)
}
for _, entry := range dirContent {
if id, ok := strings.CutPrefix(entry.Name(), tempDirPrefix); ok {
ids[id] = struct{}{}
continue
}
if id, ok := strings.CutPrefix(entry.Name(), tempdirLockPrefix); ok {
ids[id] = struct{}{}
}
}
return ids, nil
}
// RecoverStaleDirs identifies and removes stale temporary directories in the root directory.
// A directory is considered stale if its lock file can be acquired (indicating no active use).
// The function attempts to remove both the directory and its lock file.
// If a directory's lock cannot be acquired, it is considered in use and is skipped.
func RecoverStaleDirs(rootDir string) error {
potentialStaleDirs, err := listPotentialStaleDirs(rootDir)
if err != nil {
return fmt.Errorf("error listing potential stale temp dirs in %s: %w", rootDir, err)
}
if len(potentialStaleDirs) == 0 {
return nil
}
var recoveryErrors []error
for id := range potentialStaleDirs {
lockPath := filepath.Join(rootDir, tempdirLockPrefix+id)
tempDirPath := filepath.Join(rootDir, tempDirPrefix+id)
// Try to lock the lock file. If it can be locked, the directory is stale.
instanceLock, err := staging_lockfile.TryLockPath(lockPath)
if err != nil {
continue
}
if rmErr := os.RemoveAll(tempDirPath); rmErr != nil && !os.IsNotExist(rmErr) {
recoveryErrors = append(recoveryErrors, fmt.Errorf("error removing stale temp dir %s: %w", tempDirPath, rmErr))
}
if unlockErr := instanceLock.UnlockAndDelete(); unlockErr != nil {
recoveryErrors = append(recoveryErrors, fmt.Errorf("error unlocking and deleting stale lock file %s: %w", lockPath, unlockErr))
}
}
return errors.Join(recoveryErrors...)
}
// NewTempDir creates a TempDir and immediately creates both the temporary directory
// and its corresponding lock file in the specified RootDir.
// The RootDir itself will be created if it doesn't exist.
// Note: The caller MUST ensure that returned TempDir instance is cleaned up with .Cleanup().
func NewTempDir(rootDir string) (*TempDir, error) {
if err := os.MkdirAll(rootDir, 0o700); err != nil {
return nil, fmt.Errorf("creating root temp directory %s failed: %w", rootDir, err)
}
td := &TempDir{
RootDir: rootDir,
}
tempDirLock, tempDirLockFileName, err := staging_lockfile.CreateAndLock(td.RootDir, tempdirLockPrefix)
if err != nil {
return nil, fmt.Errorf("creating and locking temp dir instance lock in %s failed: %w", td.RootDir, err)
}
td.tempDirLock = tempDirLock
td.tempDirLockPath = filepath.Join(td.RootDir, tempDirLockFileName)
// Create the temporary directory that corresponds to the lock file
id := strings.TrimPrefix(tempDirLockFileName, tempdirLockPrefix)
actualTempDirPath := filepath.Join(td.RootDir, tempDirPrefix+id)
if err := os.MkdirAll(actualTempDirPath, 0o700); err != nil {
return nil, fmt.Errorf("creating temp directory %s failed: %w", actualTempDirPath, err)
}
td.tempDirPath = actualTempDirPath
td.counter = 0
return td, nil
}
// StageDeletion moves the specified file into the instance's temporary directory.
// The temporary directory must already exist (created during NewTempDir).
// Files are renamed with a counter-based prefix (e.g., "0-filename", "1-filename") to ensure uniqueness.
// Note: 'path' must be on the same filesystem as the TempDir for os.Rename to work.
// The caller MUST ensure .Cleanup() is called.
// If the TempDir has been cleaned up, this method will return an error.
func (td *TempDir) StageDeletion(path string) error {
if td.tempDirLock == nil {
return fmt.Errorf("temp dir instance not initialized or already cleaned up")
}
fileName := fmt.Sprintf("%d-", td.counter) + filepath.Base(path)
destPath := filepath.Join(td.tempDirPath, fileName)
td.counter++
return os.Rename(path, destPath)
}
// Cleanup removes the temporary directory and releases its instance lock.
// After cleanup, the TempDir instance becomes inactive and cannot be reused.
// Subsequent calls to StageDeletion() will fail.
// Multiple calls to Cleanup() are safe and will not return an error.
// Callers should typically defer Cleanup() to run after any application-level
// global locks are released to avoid holding those locks during potentially
// slow disk I/O.
func (td *TempDir) Cleanup() error {
if td.tempDirLock == nil {
logrus.Debug("Temp dir already cleaned up")
return nil
}
if err := os.RemoveAll(td.tempDirPath); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("removing temp dir %s failed: %w", td.tempDirPath, err)
}
lock := td.tempDirLock
td.tempDirPath = ""
td.tempDirLock = nil
td.tempDirLockPath = ""
return lock.UnlockAndDelete()
}
// CleanupTemporaryDirectories cleans up multiple temporary directories by calling their cleanup functions.
func CleanupTemporaryDirectories(cleanFuncs ...CleanupTempDirFunc) error {
var cleanupErrors []error
for _, cleanupFunc := range cleanFuncs {
if cleanupFunc == nil {
continue
}
if err := cleanupFunc(); err != nil {
cleanupErrors = append(cleanupErrors, err)
}
}
return errors.Join(cleanupErrors...)
}

View File

@@ -18,6 +18,7 @@ import (
"time"
drivers "github.com/containers/storage/drivers"
"github.com/containers/storage/internal/tempdir"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
@@ -38,6 +39,8 @@ import (
const (
tarSplitSuffix = ".tar-split.gz"
// tempDirPath is the subdirectory name used for storing temporary directories during layer deletion
tempDirPath = "tmp"
incompleteFlag = "incomplete"
// maxLayerStoreCleanupIterations is the number of times we try to clean up inconsistent layer store state
// in readers (which, for implementation reasons, gives other writers the opportunity to create more inconsistent state)
@@ -290,8 +293,14 @@ type rwLayerStore interface {
// updateNames modifies names associated with a layer based on (op, names).
updateNames(id string, names []string, op updateNameOperation) error
// Delete deletes a layer with the specified name or ID.
Delete(id string) error
// deleteWhileHoldingLock deletes a layer with the specified name or ID.
deleteWhileHoldingLock(id string) error
// deferredDelete deletes a layer with the specified name or ID.
// This removal happen immediately (the layer is no longer usable),
// but physically deleting the files may be deferred.
// Caller MUST call all returned cleanup functions outside of the locks.
deferredDelete(id string) ([]tempdir.CleanupTempDirFunc, error)
// Wipe deletes all layers.
Wipe() error
@@ -794,6 +803,17 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
layers := []*Layer{}
ids := make(map[string]*Layer)
if r.lockfile.IsReadWrite() {
if err := tempdir.RecoverStaleDirs(filepath.Join(r.layerdir, tempDirPath)); err != nil {
return false, err
}
for _, driverTempDirPath := range r.driver.GetTempDirRootDirs() {
if err := tempdir.RecoverStaleDirs(driverTempDirPath); err != nil {
return false, err
}
}
}
for locationIndex := range numLayerLocationIndex {
location := layerLocationFromIndex(locationIndex)
rpath := r.jsonPath[locationIndex]
@@ -935,7 +955,12 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
// Now actually delete the layers
for _, layer := range layersToDelete {
logrus.Warnf("Found incomplete layer %q, deleting it", layer.ID)
err := r.deleteInternal(layer.ID)
cleanFunctions, err := r.internalDelete(layer.ID)
defer func() {
if err := tempdir.CleanupTemporaryDirectories(cleanFunctions...); err != nil {
logrus.Errorf("Error cleaning up temporary directories: %v", err)
}
}()
if err != nil {
// Don't return the error immediately, because deleteInternal does not saveLayers();
// Even if deleting one incomplete layer fails, call saveLayers() so that other possible successfully
@@ -1334,7 +1359,7 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
r.bytocsum[layer.TOCDigest] = append(r.bytocsum[layer.TOCDigest], layer.ID)
}
if err := r.saveFor(layer); err != nil {
if e := r.Delete(layer.ID); e != nil {
if e := r.deleteWhileHoldingLock(layer.ID); e != nil {
logrus.Errorf("While recovering from a failure to save layers, error deleting layer %#v: %v", id, e)
}
return nil, err
@@ -1469,7 +1494,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
if cleanupFailureContext == "" {
cleanupFailureContext = "unknown: cleanupFailureContext not set at the failure site"
}
if e := r.Delete(id); e != nil {
if e := r.deleteWhileHoldingLock(id); e != nil {
logrus.Errorf("While recovering from a failure (%s), error deleting layer %#v: %v", cleanupFailureContext, id, e)
}
}
@@ -1634,7 +1659,7 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
options.MountLabel = layer.MountLabel
}
if (options.UidMaps != nil || options.GidMaps != nil) && !r.driver.SupportsShifting() {
if (options.UidMaps != nil || options.GidMaps != nil) && !r.driver.SupportsShifting(options.UidMaps, options.GidMaps) {
if !reflect.DeepEqual(options.UidMaps, layer.UIDMap) || !reflect.DeepEqual(options.GidMaps, layer.GIDMap) {
return "", fmt.Errorf("cannot mount layer %v: shifting not enabled", layer.ID)
}
@@ -1920,13 +1945,15 @@ func layerHasIncompleteFlag(layer *Layer) bool {
}
// Requires startWriting.
func (r *layerStore) deleteInternal(id string) error {
// Caller MUST run all returned cleanup functions after this, EVEN IF the function returns an error.
// Ideally outside of the startWriting.
func (r *layerStore) internalDelete(id string) ([]tempdir.CleanupTempDirFunc, error) {
if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to delete layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
return nil, fmt.Errorf("not allowed to delete layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
layer, ok := r.lookup(id)
if !ok {
return ErrLayerUnknown
return nil, ErrLayerUnknown
}
// Ensure that if we are interrupted, the layer will be cleaned up.
if !layerHasIncompleteFlag(layer) {
@@ -1935,16 +1962,30 @@ func (r *layerStore) deleteInternal(id string) error {
}
layer.Flags[incompleteFlag] = true
if err := r.saveFor(layer); err != nil {
return err
return nil, err
}
}
// We never unset incompleteFlag; below, we remove the entire object from r.layers.
id = layer.ID
if err := r.driver.Remove(id); err != nil && !errors.Is(err, os.ErrNotExist) {
return err
tempDirectory, err := tempdir.NewTempDir(filepath.Join(r.layerdir, tempDirPath))
cleanFunctions := []tempdir.CleanupTempDirFunc{}
cleanFunctions = append(cleanFunctions, tempDirectory.Cleanup)
if err != nil {
return nil, err
}
id = layer.ID
cleanFunc, err := r.driver.DeferredRemove(id)
cleanFunctions = append(cleanFunctions, cleanFunc)
if err != nil && !errors.Is(err, os.ErrNotExist) {
return cleanFunctions, err
}
cleanFunctions = append(cleanFunctions, tempDirectory.Cleanup)
if err := tempDirectory.StageDeletion(r.tspath(id)); err != nil && !errors.Is(err, os.ErrNotExist) {
return cleanFunctions, err
}
if err := tempDirectory.StageDeletion(r.datadir(id)); err != nil && !errors.Is(err, os.ErrNotExist) {
return cleanFunctions, err
}
os.Remove(r.tspath(id))
os.RemoveAll(r.datadir(id))
delete(r.byid, id)
for _, name := range layer.Names {
delete(r.byname, name)
@@ -1968,7 +2009,7 @@ func (r *layerStore) deleteInternal(id string) error {
}) {
selinux.ReleaseLabel(mountLabel)
}
return nil
return cleanFunctions, nil
}
// Requires startWriting.
@@ -1988,10 +2029,20 @@ func (r *layerStore) deleteInDigestMap(id string) {
}
// Requires startWriting.
func (r *layerStore) Delete(id string) error {
// This is soft-deprecated and should not have any new callers; use deferredDelete instead.
func (r *layerStore) deleteWhileHoldingLock(id string) error {
cleanupFunctions, deferErr := r.deferredDelete(id)
cleanupErr := tempdir.CleanupTemporaryDirectories(cleanupFunctions...)
return errors.Join(deferErr, cleanupErr)
}
// Requires startWriting.
// Caller MUST run all returned cleanup functions after this, EVEN IF the function returns an error.
// Ideally outside of the startWriting.
func (r *layerStore) deferredDelete(id string) ([]tempdir.CleanupTempDirFunc, error) {
layer, ok := r.lookup(id)
if !ok {
return ErrLayerUnknown
return nil, ErrLayerUnknown
}
id = layer.ID
// The layer may already have been explicitly unmounted, but if not, we
@@ -2003,13 +2054,14 @@ func (r *layerStore) Delete(id string) error {
break
}
if err != nil {
return err
return nil, err
}
}
if err := r.deleteInternal(id); err != nil {
return err
cleanFunctions, err := r.internalDelete(id)
if err != nil {
return cleanFunctions, err
}
return r.saveFor(layer)
return cleanFunctions, r.saveFor(layer)
}
// Requires startReading or startWriting.
@@ -2039,7 +2091,7 @@ func (r *layerStore) Wipe() error {
return r.byid[ids[i]].Created.After(r.byid[ids[j]].Created)
})
for _, id := range ids {
if err := r.Delete(id); err != nil {
if err := r.deleteWhileHoldingLock(id); err != nil {
return err
}
}
@@ -2550,10 +2602,14 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver
if err != nil {
compressor = pgzip.NewWriter(&tsdata)
}
if _, err := diffOutput.TarSplit.Seek(0, io.SeekStart); err != nil {
return err
}
if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that
logrus.Infof("setting compression concurrency threads to 1: %v; ignoring", err)
}
if _, err := compressor.Write(diffOutput.TarSplit); err != nil {
if _, err := diffOutput.TarSplit.WriteTo(compressor); err != nil {
compressor.Close()
return err
}
@@ -2567,7 +2623,7 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver
}
for k, v := range diffOutput.BigData {
if err := r.SetBigData(id, k, bytes.NewReader(v)); err != nil {
if err2 := r.Delete(id); err2 != nil {
if err2 := r.deleteWhileHoldingLock(id); err2 != nil {
logrus.Errorf("While recovering from a failure to set big data, error deleting layer %#v: %v", id, err2)
}
return err

Some files were not shown because too many files have changed in this diff Show More