1
0
mirror of https://github.com/openshift/installer.git synced 2026-02-05 06:46:36 +01:00

add integration test (plus required test infrastructure) for node-joiner

This commit is contained in:
Andrea Fasano
2024-09-06 11:48:51 -04:00
parent 43cedf23ed
commit 2ff7ccc89c
20 changed files with 4232 additions and 486 deletions

View File

@@ -14,6 +14,12 @@ import (
)
func main() {
if err := nodeJoiner(); err != nil {
logrus.Fatal(err)
}
}
func nodeJoiner() error {
nodesAddCmd := &cobra.Command{
Use: "add-nodes",
Short: "Generates an ISO that could be used to boot the configured nodes to let them join an existing cluster",
@@ -63,9 +69,8 @@ func main() {
rootCmd.AddCommand(nodesAddCmd)
rootCmd.AddCommand(nodesMonitorCmd)
if err := rootCmd.Execute(); err != nil {
logrus.Fatal(err)
}
return rootCmd.Execute()
}
func runRootCmd(cmd *cobra.Command, args []string) {

View File

@@ -0,0 +1,254 @@
package main
import (
"bytes"
"context"
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/go-logr/logr"
"github.com/rogpeppe/go-internal/testscript"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/yaml"
v1 "github.com/openshift/api/config/v1"
"github.com/openshift/installer/internal/tshelpers"
)
const (
testResourcesFolder = "setup"
)
func TestMain(m *testing.M) {
// Set up the logger for testing
log.SetLogger(logr.Logger{})
os.Exit(testscript.RunMain(m, map[string]func() int{
"node-joiner": func() int {
if err := nodeJoiner(); err != nil {
return 1
}
return 0
},
}))
}
func TestNodeJoinerIntegration(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
projectDir, err := os.Getwd()
assert.NoError(t, err)
testscript.Run(t, testscript.Params{
Dir: "testdata",
// Uncomment below line to help debug the testcases
// TestWork: true,
Deadline: time.Now().Add(10 * time.Minute),
Cmds: map[string]func(ts *testscript.TestScript, neg bool, args []string){
"isoIgnitionContains": tshelpers.IsoIgnitionContains,
},
Setup: func(e *testscript.Env) error {
// This is required for loading properly the embedded resources.
e.Cd = filepath.Join(projectDir, "../../data")
// Set the home dir within the test temporary working directory.
homeDir := filepath.Join(e.WorkDir, "home")
if err := os.Mkdir(homeDir, 0777); err != nil {
return err
}
for i, v := range e.Vars {
if v == "HOME=/no-home" {
e.Vars[i] = fmt.Sprintf("HOME=%s", homeDir)
break
}
}
// Create the fake registry
fakeRegistry := tshelpers.NewFakeOCPRegistry()
// Creates a new temporary cluster.
testEnv := &envtest.Environment{
CRDDirectoryPaths: []string{
// Preload OpenShift specific CRDs.
filepath.Join(projectDir, "testdata", "setup", "crds"),
},
ErrorIfCRDPathMissing: true,
// Uncomment the following line if you wish to run the test without
// using the hack/go-integration-test-nodejoiner.sh script.
// BinaryAssetsDirectory: "/tmp/k8s/1.31.0-linux-amd64",
}
// Ensures they are cleaned up on test completion.
e.Defer(func() {
testEnv.Stop()
fakeRegistry.Close()
})
// Starts the registry and cluster.
err = fakeRegistry.Start()
if err != nil {
return err
}
config, err := testEnv.Start()
if err != nil {
return err
}
// Creates a valid kubeconfig and store it in the test temporary working dir,
// so that it could be used by the node-joiner.
err = createKubeConfig(config, e.WorkDir)
if err != nil {
return err
}
// TEST_IMAGE env var will be used to replace the OCP release reference in the
// yaml setup files, so that the one exposed by the fake registry will be used.
e.Setenv("TEST_IMAGE", fakeRegistry.ReleasePullspec())
// Setup global resources required for any tests.
err = setupInitialResources(testEnv.Config, filepath.Join(projectDir, "testdata", "setup", "default"), e.Vars)
if err != nil {
return err
}
// Setup test specific resources (defined in the $WORK/setup folder).
return setupInitialResources(testEnv.Config, filepath.Join(e.WorkDir, testResourcesFolder), e.Vars)
},
})
}
func setupInitialResources(config *rest.Config, setupPath string, envArgs []string) error {
files, err := os.ReadDir(setupPath)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
csDynamic, err := dynamic.NewForConfig(config)
if err != nil {
return err
}
// For any valid yaml file, create the related resource.
for _, f := range files {
fName := filepath.Join(setupPath, f.Name())
if filepath.Ext(fName) != ".yaml" && filepath.Ext(fName) != ".yml" {
continue
}
data, err := os.ReadFile(fName)
if err != nil {
return err
}
// env vars expansion
for _, ev := range envArgs {
parts := strings.Split(ev, "=")
varName := fmt.Sprintf("$%s", parts[0])
varValue := parts[1]
data = bytes.ReplaceAll(data, []byte(varName), []byte(varValue))
}
obj := &unstructured.Unstructured{}
err = yaml.Unmarshal(data, obj)
if err != nil {
return fmt.Errorf("%s: %w", fName, err)
}
gvr, err := getGVR(obj)
if err != nil {
return fmt.Errorf("Error while getting resource gvr from %s: %w", fName, err)
}
updObj, err := csDynamic.Resource(gvr).Namespace(obj.GetNamespace()).Create(context.Background(), obj, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("Error while creating resource from %s: %w", fName, err)
}
// Take care of a resource status, in case it was configured.
if status, ok := obj.Object["status"]; ok {
updObj.Object["status"] = status
_, err = csDynamic.Resource(gvr).Namespace(obj.GetNamespace()).UpdateStatus(context.Background(), updObj, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("Error while updating resource status from %s: %w", fName, err)
}
}
}
return nil
}
func getGVR(obj *unstructured.Unstructured) (schema.GroupVersionResource, error) {
var gvr schema.GroupVersionResource
var err error
kind := obj.GetKind()
switch kind {
case "ClusterVersion":
gvr = v1.SchemeGroupVersion.WithResource("clusterversions")
case "Infrastructure":
gvr = v1.GroupVersion.WithResource("infrastructures")
case "Proxy":
gvr = v1.SchemeGroupVersion.WithResource("proxies")
case "Namespace":
gvr = corev1.SchemeGroupVersion.WithResource("namespaces")
case "Secret":
gvr = corev1.SchemeGroupVersion.WithResource("secrets")
case "Node":
gvr = corev1.SchemeGroupVersion.WithResource("nodes")
case "ConfigMap":
gvr = corev1.SchemeGroupVersion.WithResource("configmaps")
default:
err = fmt.Errorf("unsupported object kind: %s", kind)
}
return gvr, err
}
func createKubeConfig(config *rest.Config, destPath string) error {
clusterName := "nodejoiner-cluster"
clusterContext := "nodejoiner-context"
clusterUser := "nodejoiner-user"
clusters := make(map[string]*clientcmdapi.Cluster)
clusters[clusterName] = &clientcmdapi.Cluster{
Server: config.Host,
CertificateAuthorityData: config.CAData,
}
contexts := make(map[string]*clientcmdapi.Context)
contexts[clusterContext] = &clientcmdapi.Context{
Cluster: clusterName,
AuthInfo: clusterUser,
}
authinfos := make(map[string]*clientcmdapi.AuthInfo)
authinfos[clusterUser] = &clientcmdapi.AuthInfo{
ClientCertificateData: config.CertData,
ClientKeyData: config.KeyData,
}
clientConfig := clientcmdapi.Config{
Kind: "Config",
APIVersion: "v1",
Clusters: clusters,
Contexts: contexts,
CurrentContext: clusterContext,
AuthInfos: authinfos,
}
kubeConfigFile := filepath.Join(destPath, "kubeconfig")
return clientcmd.WriteToFile(clientConfig, kubeConfigFile)
}

15
cmd/node-joiner/testdata/add-nodes.txt vendored Normal file
View File

@@ -0,0 +1,15 @@
# Verify that the add-nodes command generates correctly the ISO image
node-joiner add-nodes --kubeconfig=$WORK/kubeconfig --log-level=debug --dir=$WORK
exists $WORK/node.x86_64.iso
isoIgnitionContains node.x86_64.iso /etc/assisted/add-nodes.env
isoIgnitionContains node.x86_64.iso /usr/local/bin/add-node.sh
-- nodes-config.yaml --
hosts:
- hostname: extra-worker-0
interfaces:
- name: eth0
macAddress: 00:f4:3d:a0:0e:2b

View File

@@ -0,0 +1,719 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
api-approved.openshift.io: https://github.com/openshift/api/pull/495
api.openshift.io/merged-by-featuregates: "true"
include.release.openshift.io/self-managed-high-availability: "true"
release.openshift.io/feature-set: Default
name: clusterversions.config.openshift.io
spec:
group: config.openshift.io
names:
kind: ClusterVersion
listKind: ClusterVersionList
plural: clusterversions
singular: clusterversion
scope: Cluster
versions:
- additionalPrinterColumns:
- jsonPath: .status.history[?(@.state=="Completed")].version
name: Version
type: string
- jsonPath: .status.conditions[?(@.type=="Available")].status
name: Available
type: string
- jsonPath: .status.conditions[?(@.type=="Progressing")].status
name: Progressing
type: string
- jsonPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime
name: Since
type: date
- jsonPath: .status.conditions[?(@.type=="Progressing")].message
name: Status
type: string
name: v1
schema:
openAPIV3Schema:
description: "ClusterVersion is the configuration for the ClusterVersionOperator.
This is where parameters related to automatic updates can be set. \n Compatibility
level 1: Stable within a major release for a minimum of 12 months or 3 minor
releases (whichever is longer)."
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: spec is the desired state of the cluster version - the operator
will work to ensure that the desired version is applied to the cluster.
properties:
capabilities:
description: capabilities configures the installation of optional,
core cluster components. A null value here is identical to an empty
object; see the child properties for default semantics.
properties:
additionalEnabledCapabilities:
description: additionalEnabledCapabilities extends the set of
managed capabilities beyond the baseline defined in baselineCapabilitySet. The
default is an empty set.
items:
description: ClusterVersionCapability enumerates optional, core
cluster components.
enum:
- openshift-samples
- baremetal
- marketplace
- Console
- Insights
- Storage
- CSISnapshot
- NodeTuning
- MachineAPI
- Build
- DeploymentConfig
- ImageRegistry
- OperatorLifecycleManager
- CloudCredential
- Ingress
- CloudControllerManager
type: string
type: array
x-kubernetes-list-type: atomic
baselineCapabilitySet:
description: baselineCapabilitySet selects an initial set of optional
capabilities to enable, which can be extended via additionalEnabledCapabilities. If
unset, the cluster will choose a default, and the default may
change over time. The current default is vCurrent.
enum:
- None
- v4.11
- v4.12
- v4.13
- v4.14
- v4.15
- v4.16
- vCurrent
type: string
type: object
channel:
description: channel is an identifier for explicitly requesting that
a non-default set of updates be applied to this cluster. The default
channel will be contain stable updates that are appropriate for
production clusters.
type: string
clusterID:
description: clusterID uniquely identifies this cluster. This is expected
to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
in hexadecimal values). This is a required field.
type: string
desiredUpdate:
description: "desiredUpdate is an optional field that indicates the
desired value of the cluster version. Setting this value will trigger
an upgrade (if the current version does not match the desired version).
The set of recommended update values is listed as part of available
updates in status, and setting values outside that range may cause
the upgrade to fail. \n Some of the fields are inter-related with
restrictions and meanings described here. 1. image is specified,
version is specified, architecture is specified. API validation
error. 2. image is specified, version is specified, architecture
is not specified. You should not do this. version is silently ignored
and image is used. 3. image is specified, version is not specified,
architecture is specified. API validation error. 4. image is specified,
version is not specified, architecture is not specified. image is
used. 5. image is not specified, version is specified, architecture
is specified. version and desired architecture are used to select
an image. 6. image is not specified, version is specified, architecture
is not specified. version and current architecture are used to select
an image. 7. image is not specified, version is not specified, architecture
is specified. API validation error. 8. image is not specified, version
is not specified, architecture is not specified. API validation
error. \n If an upgrade fails the operator will halt and report
status about the failing component. Setting the desired update value
back to the previous version will cause a rollback to be attempted.
Not all rollbacks will succeed."
properties:
architecture:
description: architecture is an optional field that indicates
the desired value of the cluster architecture. In this context
cluster architecture means either a single architecture or a
multi architecture. architecture can only be set to Multi thereby
only allowing updates from single to multi architecture. If
architecture is set, image cannot be set and version must be
set. Valid values are 'Multi' and empty.
enum:
- Multi
- ""
type: string
force:
description: force allows an administrator to update to an image
that has failed verification or upgradeable checks. This option
should only be used when the authenticity of the provided image
has been verified out of band because the provided image will
run with full administrative access to the cluster. Do not use
this flag with images that comes from unknown or potentially
malicious sources.
type: boolean
image:
description: image is a container image location that contains
the update. image should be used when the desired version does
not exist in availableUpdates or history. When image is set,
version is ignored. When image is set, version should be empty.
When image is set, architecture cannot be specified.
type: string
version:
description: version is a semantic version identifying the update
version. version is ignored if image is specified and required
if architecture is specified.
type: string
type: object
x-kubernetes-validations:
- message: cannot set both Architecture and Image
rule: 'has(self.architecture) && has(self.image) ? (self.architecture
== '''' || self.image == '''') : true'
- message: Version must be set if Architecture is set
rule: 'has(self.architecture) && self.architecture != '''' ? self.version
!= '''' : true'
overrides:
description: overrides is list of overides for components that are
managed by cluster version operator. Marking a component unmanaged
will prevent the operator from creating or updating the object.
items:
description: ComponentOverride allows overriding cluster version
operator's behavior for a component.
properties:
group:
description: group identifies the API group that the kind is
in.
type: string
kind:
description: kind indentifies which object to override.
type: string
name:
description: name is the component's name.
type: string
namespace:
description: namespace is the component's namespace. If the
resource is cluster scoped, the namespace should be empty.
type: string
unmanaged:
description: 'unmanaged controls if cluster version operator
should stop managing the resources in this cluster. Default:
false'
type: boolean
required:
- group
- kind
- name
- namespace
- unmanaged
type: object
type: array
x-kubernetes-list-map-keys:
- kind
- group
- namespace
- name
x-kubernetes-list-type: map
upstream:
description: upstream may be used to specify the preferred update
server. By default it will use the appropriate update server for
the cluster and region.
type: string
required:
- clusterID
type: object
status:
description: status contains information about the available updates and
any in-progress updates.
properties:
availableUpdates:
description: availableUpdates contains updates recommended for this
cluster. Updates which appear in conditionalUpdates but not in availableUpdates
may expose this cluster to known issues. This list may be empty
if no updates are recommended, if the update service is unavailable,
or if an invalid channel has been specified.
items:
description: Release represents an OpenShift release image and associated
metadata.
properties:
channels:
description: channels is the set of Cincinnati channels to which
the release currently belongs.
items:
type: string
type: array
x-kubernetes-list-type: set
image:
description: image is a container image location that contains
the update. When this field is part of spec, image is optional
if version is specified and the availableUpdates field contains
a matching version.
type: string
url:
description: url contains information about this release. This
URL is set by the 'url' metadata property on a release or
the metadata returned by the update API and should be displayed
as a link in user interfaces. The URL field may not be set
for test or nightly releases.
type: string
version:
description: version is a semantic version identifying the update
version. When this field is part of spec, version is optional
if image is specified.
type: string
type: object
nullable: true
type: array
x-kubernetes-list-type: atomic
capabilities:
description: capabilities describes the state of optional, core cluster
components.
properties:
enabledCapabilities:
description: enabledCapabilities lists all the capabilities that
are currently managed.
items:
description: ClusterVersionCapability enumerates optional, core
cluster components.
enum:
- openshift-samples
- baremetal
- marketplace
- Console
- Insights
- Storage
- CSISnapshot
- NodeTuning
- MachineAPI
- Build
- DeploymentConfig
- ImageRegistry
- OperatorLifecycleManager
- CloudCredential
- Ingress
- CloudControllerManager
type: string
type: array
x-kubernetes-list-type: atomic
knownCapabilities:
description: knownCapabilities lists all the capabilities known
to the current cluster.
items:
description: ClusterVersionCapability enumerates optional, core
cluster components.
enum:
- openshift-samples
- baremetal
- marketplace
- Console
- Insights
- Storage
- CSISnapshot
- NodeTuning
- MachineAPI
- Build
- DeploymentConfig
- ImageRegistry
- OperatorLifecycleManager
- CloudCredential
- Ingress
- CloudControllerManager
type: string
type: array
x-kubernetes-list-type: atomic
type: object
conditionalUpdates:
description: conditionalUpdates contains the list of updates that
may be recommended for this cluster if it meets specific required
conditions. Consumers interested in the set of updates that are
actually recommended for this cluster should use availableUpdates.
This list may be empty if no updates are recommended, if the update
service is unavailable, or if an empty or invalid channel has been
specified.
items:
description: ConditionalUpdate represents an update which is recommended
to some clusters on the version the current cluster is reconciling,
but which may not be recommended for the current cluster.
properties:
conditions:
description: 'conditions represents the observations of the
conditional update''s current status. Known types are: * Recommended,
for whether the update is recommended for the current cluster.'
items:
description: "Condition contains details for one aspect of
the current state of this API Resource. --- This struct
is intended for direct use as an array at the field path
.status.conditions. For example, \n type FooStatus struct{
// Represents the observations of a foo's current state.
// Known .status.conditions.type are: \"Available\", \"Progressing\",
and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge
// +listType=map // +listMapKey=type Conditions []metav1.Condition
`json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\"
protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields
}"
properties:
lastTransitionTime:
description: lastTransitionTime is the last time the condition
transitioned from one status to another. This should
be when the underlying condition changed. If that is
not known, then using the time when the API field changed
is acceptable.
format: date-time
type: string
message:
description: message is a human readable message indicating
details about the transition. This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: observedGeneration represents the .metadata.generation
that the condition was set based upon. For instance,
if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration
is 9, the condition is out of date with respect to the
current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: reason contains a programmatic identifier
indicating the reason for the condition's last transition.
Producers of specific condition types may define expected
values and meanings for this field, and whether the
values are considered a guaranteed API. The value should
be a CamelCase string. This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False,
Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
--- Many .condition.type values are consistent across
resources like Available, but because arbitrary conditions
can be useful (see .node.status.conditions), the ability
to deconflict is important. The regex it matches is
(dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
x-kubernetes-list-map-keys:
- type
x-kubernetes-list-type: map
release:
description: release is the target of the update.
properties:
channels:
description: channels is the set of Cincinnati channels
to which the release currently belongs.
items:
type: string
type: array
x-kubernetes-list-type: set
image:
description: image is a container image location that contains
the update. When this field is part of spec, image is
optional if version is specified and the availableUpdates
field contains a matching version.
type: string
url:
description: url contains information about this release.
This URL is set by the 'url' metadata property on a release
or the metadata returned by the update API and should
be displayed as a link in user interfaces. The URL field
may not be set for test or nightly releases.
type: string
version:
description: version is a semantic version identifying the
update version. When this field is part of spec, version
is optional if image is specified.
type: string
type: object
risks:
description: risks represents the range of issues associated
with updating to the target release. The cluster-version operator
will evaluate all entries, and only recommend the update if
there is at least one entry and all entries recommend the
update.
items:
description: ConditionalUpdateRisk represents a reason and
cluster-state for not recommending a conditional update.
properties:
matchingRules:
description: matchingRules is a slice of conditions for
deciding which clusters match the risk and which do
not. The slice is ordered by decreasing precedence.
The cluster-version operator will walk the slice in
order, and stop after the first it can successfully
evaluate. If no condition can be successfully evaluated,
the update will not be recommended.
items:
description: ClusterCondition is a union of typed cluster
conditions. The 'type' property determines which
of the type-specific properties are relevant. When
evaluated on a cluster, the condition may match, not
match, or fail to evaluate.
properties:
promql:
description: promQL represents a cluster condition
based on PromQL.
properties:
promql:
description: PromQL is a PromQL query classifying
clusters. This query query should return a
1 in the match case and a 0 in the does-not-match
case. Queries which return no time series,
or which return values besides 0 or 1, are
evaluation failures.
type: string
required:
- promql
type: object
type:
description: type represents the cluster-condition
type. This defines the members and semantics of
any additional properties.
enum:
- Always
- PromQL
type: string
required:
- type
type: object
minItems: 1
type: array
x-kubernetes-list-type: atomic
message:
description: message provides additional information about
the risk of updating, in the event that matchingRules
match the cluster state. This is only to be consumed
by humans. It may contain Line Feed characters (U+000A),
which should be rendered as new lines.
minLength: 1
type: string
name:
description: name is the CamelCase reason for not recommending
a conditional update, in the event that matchingRules
match the cluster state.
minLength: 1
type: string
url:
description: url contains information about this risk.
format: uri
minLength: 1
type: string
required:
- matchingRules
- message
- name
- url
type: object
minItems: 1
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
required:
- release
- risks
type: object
type: array
x-kubernetes-list-type: atomic
conditions:
description: conditions provides information about the cluster version.
The condition "Available" is set to true if the desiredUpdate has
been reached. The condition "Progressing" is set to true if an update
is being applied. The condition "Degraded" is set to true if an
update is currently blocked by a temporary or permanent error. Conditions
are only valid for the current desiredUpdate when metadata.generation
is equal to status.generation.
items:
description: ClusterOperatorStatusCondition represents the state
of the operator's managed and monitored components.
properties:
lastTransitionTime:
description: lastTransitionTime is the time of the last update
to the current status property.
format: date-time
type: string
message:
description: message provides additional information about the
current condition. This is only to be consumed by humans. It
may contain Line Feed characters (U+000A), which should be
rendered as new lines.
type: string
reason:
description: reason is the CamelCase reason for the condition's
current status.
type: string
status:
description: status of the condition, one of True, False, Unknown.
type: string
type:
description: type specifies the aspect reported by this condition.
type: string
required:
- lastTransitionTime
- status
- type
type: object
type: array
x-kubernetes-list-map-keys:
- type
x-kubernetes-list-type: map
desired:
description: desired is the version that the cluster is reconciling
towards. If the cluster is not yet fully initialized desired will
be set with the information available, which may be an image or
a tag.
properties:
channels:
description: channels is the set of Cincinnati channels to which
the release currently belongs.
items:
type: string
type: array
x-kubernetes-list-type: set
image:
description: image is a container image location that contains
the update. When this field is part of spec, image is optional
if version is specified and the availableUpdates field contains
a matching version.
type: string
url:
description: url contains information about this release. This
URL is set by the 'url' metadata property on a release or the
metadata returned by the update API and should be displayed
as a link in user interfaces. The URL field may not be set for
test or nightly releases.
type: string
version:
description: version is a semantic version identifying the update
version. When this field is part of spec, version is optional
if image is specified.
type: string
type: object
history:
description: history contains a list of the most recent versions applied
to the cluster. This value may be empty during cluster startup,
and then will be updated when a new update is being applied. The
newest update is first in the list and it is ordered by recency.
Updates in the history have state Completed if the rollout completed
- if an update was failing or halfway applied the state will be
Partial. Only a limited amount of update history is preserved.
items:
description: UpdateHistory is a single attempted update to the cluster.
properties:
acceptedRisks:
description: acceptedRisks records risks which were accepted
to initiate the update. For example, it may menition an Upgradeable=False
or missing signature that was overriden via desiredUpdate.force,
or an update that was initiated despite not being in the availableUpdates
set of recommended update targets.
type: string
completionTime:
description: completionTime, if set, is when the update was
fully applied. The update that is currently being applied
will have a null completion time. Completion time will always
be set for entries that are not the current update (usually
to the started time of the next update).
format: date-time
nullable: true
type: string
image:
description: image is a container image location that contains
the update. This value is always populated.
type: string
startedTime:
description: startedTime is the time at which the update was
started.
format: date-time
type: string
state:
description: state reflects whether the update was fully applied.
The Partial state indicates the update is not fully applied,
while the Completed state indicates the update was successfully
rolled out at least once (all parts of the update successfully
applied).
type: string
verified:
description: verified indicates whether the provided update
was properly verified before it was installed. If this is
false the cluster may not be trusted. Verified does not cover
upgradeable checks that depend on the cluster state at the
time when the update target was accepted.
type: boolean
version:
description: version is a semantic version identifying the update
version. If the requested image does not define a version,
or if a failure occurs retrieving the image, this value may
be empty.
type: string
required:
- completionTime
- image
- startedTime
- state
- verified
type: object
type: array
x-kubernetes-list-type: atomic
observedGeneration:
description: observedGeneration reports which version of the spec
is being synced. If this value is not equal to metadata.generation,
then the desired and conditions fields may represent a previous
version.
format: int64
type: integer
versionHash:
description: versionHash is a fingerprint of the content that the
cluster will be updated with. It is used by the operator to avoid
unnecessary work and is for internal use only.
type: string
required:
- availableUpdates
- desired
- observedGeneration
- versionHash
type: object
required:
- spec
type: object
x-kubernetes-validations:
- message: the `marketplace` capability requires the `OperatorLifecycleManager`
capability, which is neither explicitly or implicitly enabled in this
cluster, please enable the `OperatorLifecycleManager` capability
rule: 'has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities)
&& self.spec.capabilities.baselineCapabilitySet == ''None'' && ''marketplace''
in self.spec.capabilities.additionalEnabledCapabilities ? ''OperatorLifecycleManager''
in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status)
&& has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities)
&& ''OperatorLifecycleManager'' in self.status.capabilities.enabledCapabilities)
: true'
served: true
storage: true
subresources:
status: {}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,107 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
api-approved.openshift.io: https://github.com/openshift/api/pull/470
api.openshift.io/merged-by-featuregates: "true"
include.release.openshift.io/ibm-cloud-managed: "true"
include.release.openshift.io/self-managed-high-availability: "true"
release.openshift.io/bootstrap-required: "true"
name: proxies.config.openshift.io
spec:
group: config.openshift.io
names:
kind: Proxy
listKind: ProxyList
plural: proxies
singular: proxy
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
description: "Proxy holds cluster-wide information on how to configure default
proxies for the cluster. The canonical name is `cluster` \n Compatibility
level 1: Stable within a major release for a minimum of 12 months or 3 minor
releases (whichever is longer)."
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Spec holds user-settable values for the proxy configuration
properties:
httpProxy:
description: httpProxy is the URL of the proxy for HTTP requests. Empty
means unset and will not result in an env var.
type: string
httpsProxy:
description: httpsProxy is the URL of the proxy for HTTPS requests. Empty
means unset and will not result in an env var.
type: string
noProxy:
description: noProxy is a comma-separated list of hostnames and/or
CIDRs and/or IPs for which the proxy should not be used. Empty means
unset and will not result in an env var.
type: string
readinessEndpoints:
description: readinessEndpoints is a list of endpoints used to verify
readiness of the proxy.
items:
type: string
type: array
trustedCA:
description: "trustedCA is a reference to a ConfigMap containing a
CA certificate bundle. The trustedCA field should only be consumed
by a proxy validator. The validator is responsible for reading the
certificate bundle from the required key \"ca-bundle.crt\", merging
it with the system default trust bundle, and writing the merged
trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\"
namespace. Clients that expect to make proxy connections must use
the trusted-ca-bundle for all HTTPS requests to the proxy, and may
use the trusted-ca-bundle for non-proxy HTTPS requests as well.
\n The namespace for the ConfigMap referenced by trustedCA is \"openshift-config\".
Here is an example ConfigMap (in yaml): \n apiVersion: v1 kind:
ConfigMap metadata: name: user-ca-bundle namespace: openshift-config
data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom CA certificate
bundle. -----END CERTIFICATE-----"
properties:
name:
description: name is the metadata.name of the referenced config
map
type: string
required:
- name
type: object
type: object
status:
description: status holds observed values from the cluster. They may not
be overridden.
properties:
httpProxy:
description: httpProxy is the URL of the proxy for HTTP requests.
type: string
httpsProxy:
description: httpsProxy is the URL of the proxy for HTTPS requests.
type: string
noProxy:
description: noProxy is a comma-separated list of hostnames and/or
CIDRs for which the proxy should not be used.
type: string
type: object
required:
- spec
type: object
served: true
storage: true
subresources:
status: {}

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: openshift-machine-config-operator

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: openshift-config

View File

@@ -0,0 +1,20 @@
apiVersion: config.openshift.io/v1
kind: ClusterVersion
metadata:
name: version
spec:
clusterID: c37c9544-4320-4380-9d8b-0753a4d9ea57
status:
availableUpdates: null
observedGeneration: 2
versionHash: NC4xNi4wLWxvY2FsaG9zdAo=
desired:
image: $TEST_IMAGE
version: 4.25.0
history:
- completionTime: "2024-03-18T09:49:38Z"
image: $TEST_IMAGE
startedTime: "2024-03-18T09:11:55Z"
state: Completed
verified: false
version: 4.25.0

View File

@@ -0,0 +1,7 @@
apiVersion: config.openshift.io/v1
kind: Infrastructure
metadata:
name: cluster
spec:
platformSpec:
type: BareMetal

View File

@@ -0,0 +1,8 @@
apiVersion: config.openshift.io/v1
kind: Proxy
metadata:
name: cluster
spec:
trustedCA:
name: ""
status: {}

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: cluster-config-v1
namespace: kube-system
data:
install-config: |
apiVersion: v1
metadata:
name: ostest
baseDomain: test.nodejoiner.org
sshKey: my-sshKey

View File

@@ -0,0 +1,28 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: coreos-bootimages
namespace: openshift-machine-config-operator
data:
stream: |
{
"stream": "stable",
"architectures": {
"x86_64": {
"artifacts": {
"metal": {
"release": "39.20231101.3.0",
"formats": {
"iso": {
"disk": {
"location": "<not-used>",
"sha256": "0c19997ca0170a2d8634b5942c9437a18b6d354b020c7e24aa9fe41f1458f33e"
}
}
}
}
}
}
}
}

View File

@@ -0,0 +1,9 @@
apiVersion: v1
kind: Node
metadata:
name: master-0
labels:
node-role.kubernetes.io/master: ""
status:
nodeInfo:
architecture: amd64

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Secret
metadata:
name: pull-secret
namespace: openshift-config
type: kubernetes.io/dockerconfigjson
stringData:
.dockerconfigjson: |-
{
"auths": {
"quay.io": {
"auth": "c3VwZXItc2VjcmV0Cg=="
}
}
}

View File

@@ -1,26 +1,18 @@
package main
import (
"compress/gzip"
"encoding/json"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"strings"
"testing"
"github.com/cavaliercoder/go-cpio"
igntypes "github.com/coreos/ignition/v2/config/v3_2/types"
"github.com/diskfs/go-diskfs"
"github.com/go-openapi/errors"
"github.com/pkg/diff"
"github.com/rogpeppe/go-internal/testscript"
"github.com/stretchr/testify/assert"
"github.com/vincent-petithory/dataurl"
"gopkg.in/yaml.v2"
"github.com/openshift/installer/internal/tshelpers"
"github.com/openshift/installer/pkg/asset/releaseimage"
)
@@ -134,483 +126,19 @@ func runIntegrationTest(t *testing.T, testFolder string) {
},
Cmds: map[string]func(*testscript.TestScript, bool, []string){
"isocmp": isoCmp,
"ignitionImgContains": ignitionImgContains,
"configImgContains": configImgContains,
"initrdImgContains": initrdImgContains,
"unconfiguredIgnContains": unconfiguredIgnContains,
"unconfiguredIgnCmp": unconfiguredIgnCmp,
"expandFile": expandFile,
"isoContains": isoContains,
"existsInIso": existsInIso,
"isocmp": tshelpers.IsoCmp,
"ignitionImgContains": tshelpers.IgnitionImgContains,
"configImgContains": tshelpers.ConfigImgContains,
"initrdImgContains": tshelpers.InitrdImgContains,
"unconfiguredIgnContains": tshelpers.UnconfiguredIgnContains,
"unconfiguredIgnCmp": tshelpers.UnconfiguredIgnCmp,
"expandFile": tshelpers.ExpandFile,
"isoContains": tshelpers.IsoContains,
"existsInIso": tshelpers.ExistsInIso,
},
})
}
// [!] ignitionImgContains `isoPath` `file` check if the specified file `file`
// is stored within /images/ignition.img archive in the ISO `isoPath` image.
func ignitionImgContains(ts *testscript.TestScript, neg bool, args []string) {
if len(args) != 2 {
ts.Fatalf("usage: ignitionImgContains isoPath file")
}
workDir := ts.Getenv("WORK")
isoPath, eFilePath := args[0], args[1]
isoPathAbs := filepath.Join(workDir, isoPath)
_, err := extractArchiveFile(isoPathAbs, "/images/ignition.img", eFilePath)
ts.Check(err)
}
// [!] configImgContains `isoPath` `file` check if the specified file `file`
// is stored within the config image ISO.
func configImgContains(ts *testscript.TestScript, neg bool, args []string) {
if len(args) != 2 {
ts.Fatalf("usage: configImgContains isoPath file")
}
workDir := ts.Getenv("WORK")
isoPath, eFilePath := args[0], args[1]
isoPathAbs := filepath.Join(workDir, isoPath)
_, err := extractArchiveFile(isoPathAbs, eFilePath, "")
ts.Check(err)
}
// archiveFileNames `isoPath` get the names of the archive files to use
// based on the name of the ISO image.
func archiveFileNames(isoPath string) (string, string, error) {
if strings.HasPrefix(isoPath, "agent.") {
return "/images/ignition.img", "config.ign", nil
} else if strings.HasPrefix(isoPath, "agentconfig.") {
return "/config.gz", "", nil
}
return "", "", errors.NotFound(fmt.Sprintf("ISO %s has unrecognized prefix", isoPath))
}
// [!] unconfiguredIgnContains `file` check if the specified file `file`
// is stored within the unconfigured ignition Storage Files.
func unconfiguredIgnContains(ts *testscript.TestScript, neg bool, args []string) {
if len(args) != 1 {
ts.Fatalf("usage: unconfiguredIgnContains file")
}
ignitionStorageContains(ts, neg, []string{"unconfigured-agent.ign", args[0]})
}
// [!] ignitionStorageContains `ignPath` `file` check if the specified file `file`
// is stored within the ignition Storage Files.
func ignitionStorageContains(ts *testscript.TestScript, neg bool, args []string) {
if len(args) != 2 {
ts.Fatalf("usage: ignitionStorageContains ignPath file")
}
workDir := ts.Getenv("WORK")
ignPath, eFilePath := args[0], args[1]
ignPathAbs := filepath.Join(workDir, ignPath)
config, err := readIgnition(ts, ignPathAbs)
ts.Check(err)
found := false
for _, f := range config.Storage.Files {
if f.Path == eFilePath {
found = true
}
}
if !found && !neg {
ts.Fatalf("%s does not contain %s", ignPath, eFilePath)
}
if neg && found {
ts.Fatalf("%s should not contain %s", ignPath, eFilePath)
}
}
// [!] isoCmp `isoPath` `isoFile` `expectedFile` check that the content of the file
// `isoFile` - extracted from the ISO embedded configuration file referenced
// by `isoPath` - matches the content of the local file `expectedFile`.
// Environment variables in `expectedFile` are substituted before the comparison.
func isoCmp(ts *testscript.TestScript, neg bool, args []string) {
if len(args) != 3 {
ts.Fatalf("usage: isocmp isoPath file1 file2")
}
workDir := ts.Getenv("WORK")
isoPath, aFilePath, eFilePath := args[0], args[1], args[2]
isoPathAbs := filepath.Join(workDir, isoPath)
archiveFile, ignitionFile, err := archiveFileNames(isoPath)
if err != nil {
ts.Check(err)
}
aData, err := readFileFromISO(isoPathAbs, archiveFile, ignitionFile, aFilePath)
ts.Check(err)
eFilePathAbs := filepath.Join(workDir, eFilePath)
eData, err := os.ReadFile(eFilePathAbs)
ts.Check(err)
byteCompare(ts, neg, aData, eData, aFilePath, eFilePath)
}
// [!] unconfiguredIgnCmp `fileInIgn` `expectedFile` check that the content
// of the file `fileInIgn` extracted from the unconfigured ignition
// configuration file matches the content of the local file `expectedFile`.
// Environment variables in in `expectedFile` are substituted before the comparison.
func unconfiguredIgnCmp(ts *testscript.TestScript, neg bool, args []string) {
if len(args) != 2 {
ts.Fatalf("usage: iunconfiguredIgnCmp file1 file2")
}
argsNext := []string{"unconfigured-agent.ign", args[0], args[1]}
ignitionStorageCmp(ts, neg, argsNext)
}
// [!] ignitionStorageCmp `ignPath` `ignFile` `expectedFile` check that the content of the file
// `ignFile` - extracted from the ignition configuration file referenced
// by `ignPath` - matches the content of the local file `expectedFile`.
// Environment variables in in `expectedFile` are substituted before the comparison.
func ignitionStorageCmp(ts *testscript.TestScript, neg bool, args []string) {
if len(args) != 3 {
ts.Fatalf("usage: ignitionStorageCmp ignPath file1 file2")
}
workDir := ts.Getenv("WORK")
ignPath, aFilePath, eFilePath := args[0], args[1], args[2]
ignPathAbs := filepath.Join(workDir, ignPath)
config, err := readIgnition(ts, ignPathAbs)
ts.Check(err)
aData, err := readFileFromIgnitionCfg(&config, aFilePath)
ts.Check(err)
eFilePathAbs := filepath.Join(workDir, eFilePath)
eData, err := os.ReadFile(eFilePathAbs)
ts.Check(err)
byteCompare(ts, neg, aData, eData, aFilePath, eFilePath)
}
func readIgnition(ts *testscript.TestScript, ignPath string) (config igntypes.Config, err error) {
rawIgn, err := os.ReadFile(ignPath)
ts.Check(err)
err = json.Unmarshal(rawIgn, &config)
return config, err
}
// [!] expandFile `file...` can be used to substitute environment variables
// references for each file specified.
func expandFile(ts *testscript.TestScript, neg bool, args []string) {
if len(args) != 1 {
ts.Fatalf("usage: expandFile file...")
}
workDir := ts.Getenv("WORK")
for _, f := range args {
fileName := filepath.Join(workDir, f)
data, err := os.ReadFile(fileName)
ts.Check(err)
newData := expand(ts, data)
err = os.WriteFile(fileName, []byte(newData), 0)
ts.Check(err)
}
}
func expand(ts *testscript.TestScript, s []byte) string {
return os.Expand(string(s), func(key string) string {
return ts.Getenv(key)
})
}
func byteCompare(ts *testscript.TestScript, neg bool, aData, eData []byte, aFilePath, eFilePath string) {
aText := string(aData)
eText := expand(ts, eData)
eq := aText == eText
if neg {
if eq {
ts.Fatalf("%s and %s do not differ", aFilePath, eFilePath)
}
return
}
if eq {
return
}
ts.Logf(aText)
var sb strings.Builder
if err := diff.Text(eFilePath, aFilePath, eText, aText, &sb); err != nil {
ts.Check(err)
}
ts.Logf("%s", sb.String())
ts.Fatalf("%s and %s differ", eFilePath, aFilePath)
}
func readFileFromISO(isoPath, archiveFile, ignitionFile, nodePath string) ([]byte, error) {
config, err := extractCfgData(isoPath, archiveFile, ignitionFile, nodePath)
if err != nil {
return nil, err
}
return config, nil
}
func readFileFromIgnitionCfg(config *igntypes.Config, nodePath string) ([]byte, error) {
for _, f := range config.Storage.Files {
if f.Node.Path == nodePath {
actualData, err := dataurl.DecodeString(*f.FileEmbedded1.Contents.Source)
if err != nil {
return nil, err
}
return actualData.Data, nil
}
}
return nil, errors.NotFound(nodePath)
}
func extractArchiveFile(isoPath, archive, fileName string) ([]byte, error) {
disk, err := diskfs.Open(isoPath, diskfs.WithOpenMode(diskfs.ReadOnly))
if err != nil {
return nil, err
}
fs, err := disk.GetFilesystem(0)
if err != nil {
return nil, err
}
ignitionImg, err := fs.OpenFile(archive, os.O_RDONLY)
if err != nil {
return nil, err
}
gzipReader, err := gzip.NewReader(ignitionImg)
if err != nil {
return nil, err
}
cpioReader := cpio.NewReader(gzipReader)
for {
header, err := cpioReader.Next()
if err == io.EOF { //nolint:errorlint
// end of cpio archive
break
}
if err != nil {
return nil, err
}
// If the file is not in ignition return it directly
if fileName == "" || header.Name == fileName {
rawContent, err := io.ReadAll(cpioReader)
if err != nil {
return nil, err
}
return rawContent, nil
}
}
return nil, errors.NotFound(fmt.Sprintf("File %s not found within the %s archive", fileName, archive))
}
func extractCfgData(isoPath, archiveFile, ignitionFile, nodePath string) ([]byte, error) {
if ignitionFile == "" {
// If the archive is not part of an ignition file return the archive data
rawContent, err := extractArchiveFile(isoPath, archiveFile, nodePath)
if err != nil {
return nil, err
}
return rawContent, nil
}
rawContent, err := extractArchiveFile(isoPath, archiveFile, ignitionFile)
if err != nil {
return nil, err
}
var config igntypes.Config
err = json.Unmarshal(rawContent, &config)
if err != nil {
return nil, err
}
for _, f := range config.Storage.Files {
if f.Node.Path == nodePath {
actualData, err := dataurl.DecodeString(*f.FileEmbedded1.Contents.Source)
if err != nil {
return nil, err
}
return actualData.Data, nil
}
}
return nil, errors.NotFound(fmt.Sprintf("File %s not found within the %s archive", nodePath, archiveFile))
}
// [!] initrdImgContains `isoPath` `file` check if the specified file `file`
// is stored within a compressed cpio archive by scanning the content of
// /images/ignition.img archive in the ISO `isoPath` image (note: plain cpio
// archives are ignored).
func initrdImgContains(ts *testscript.TestScript, neg bool, args []string) {
if len(args) != 2 {
ts.Fatalf("usage: initrdImgContains isoPath file")
}
workDir := ts.Getenv("WORK")
isoPath, eFilePath := args[0], args[1]
isoPathAbs := filepath.Join(workDir, isoPath)
err := checkFileFromInitrdImg(isoPathAbs, eFilePath)
ts.Check(err)
}
// [!] isoContains `isoPath` `file` check if the specified `file` is stored
// within the ISO `isoPath` image.
func isoContains(ts *testscript.TestScript, neg bool, args []string) {
if len(args) != 2 {
ts.Fatalf("usage: isoContains isoPath file")
}
workDir := ts.Getenv("WORK")
isoPath, filePath := args[0], args[1]
isoPathAbs := filepath.Join(workDir, isoPath)
disk, err := diskfs.Open(isoPathAbs, diskfs.WithOpenMode(diskfs.ReadOnly))
ts.Check(err)
fs, err := disk.GetFilesystem(0)
ts.Check(err)
_, err = fs.OpenFile(filePath, os.O_RDONLY)
ts.Check(err)
}
// [!] existsInIso `isoPath` `file` check if the specified `file` is stored
// within the ISO `isoPath` image.
func existsInIso(ts *testscript.TestScript, neg bool, args []string) {
if len(args) != 2 {
ts.Fatalf("usage: isoContains isoPath file")
}
workDir := ts.Getenv("WORK")
isoPath, filePath := args[0], args[1]
isoPathAbs := filepath.Join(workDir, isoPath)
archiveFile, ignitionFile, err := archiveFileNames(isoPath)
if err != nil {
ts.Check(err)
}
_, err = readFileFromISO(isoPathAbs, archiveFile, ignitionFile, filePath)
ts.Check(err)
}
func checkFileFromInitrdImg(isoPath string, fileName string) error {
disk, err := diskfs.Open(isoPath, diskfs.WithOpenMode(diskfs.ReadOnly))
if err != nil {
return err
}
fs, err := disk.GetFilesystem(0)
if err != nil {
return err
}
initRdImg, err := fs.OpenFile("/images/pxeboot/initrd.img", os.O_RDONLY)
if err != nil {
return err
}
defer initRdImg.Close()
const (
gzipID1 = 0x1f
gzipID2 = 0x8b
gzipDeflate = 0x08
)
buff := make([]byte, 4096)
for {
_, err := initRdImg.Read(buff)
if err == io.EOF { //nolint:errorlint
break
}
foundAt := -1
for idx := 0; idx < len(buff)-2; idx++ {
// scan the buffer for a potential gzip header
if buff[idx+0] == gzipID1 && buff[idx+1] == gzipID2 && buff[idx+2] == gzipDeflate {
foundAt = idx
break
}
}
if foundAt >= 0 {
// check if it's really a compressed cpio archive
delta := int64(foundAt - len(buff))
newPos, err := initRdImg.Seek(delta, io.SeekCurrent)
if err != nil {
break
}
files, err := lookForCpioFiles(initRdImg)
if err != nil {
if _, err := initRdImg.Seek(newPos+2, io.SeekStart); err != nil {
break
}
continue
}
// check if the current cpio files match the required ones
for _, f := range files {
matched, err := filepath.Match(fileName, f)
if err != nil {
return err
}
if matched {
return nil
}
}
}
}
return errors.NotFound(fmt.Sprintf("File %s not found within the /images/pxeboot/initrd.img archive", fileName))
}
func lookForCpioFiles(r io.Reader) ([]string, error) {
var files []string
gr, err := gzip.NewReader(r)
if err != nil {
return nil, err
}
defer gr.Close()
// skip in case of garbage
if gr.OS != 255 && gr.OS >= 13 {
return nil, fmt.Errorf("Unknown OS code: %v", gr.Header.OS)
}
cr := cpio.NewReader(gr)
for {
h, err := cr.Next()
if err != nil {
break
}
files = append(files, h.Name)
}
return files, nil
}
func updatePullSecret(workDir, authFilePath string) error {
authFile, err := os.ReadFile(authFilePath)
if err != nil {

View File

@@ -0,0 +1,7 @@
#!/bin/sh
# Example: ./hack/go-integration-test-nodejoiner.sh
go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
export KUBEBUILDER_ASSETS="$($GOPATH/bin/setup-envtest use 1.31.0 -p path)"
go test -parallel 1 -p 1 -timeout 0 -run .Integration ./cmd/node-joiner/... "${@}"

View File

@@ -1,4 +1,4 @@
#!/bin/sh
# Example: ./hack/go-integration-test.sh
go test -parallel 1 -p 1 -timeout 0 -run .Integration ./cmd/... ./data/... ./pkg/... "${@}"
go test -parallel 1 -p 1 -timeout 0 -run .Integration ./cmd/openshift-install/... ./data/... ./pkg/... "${@}"

View File

@@ -0,0 +1,504 @@
package tshelpers
import (
"compress/gzip"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/cavaliercoder/go-cpio"
igntypes "github.com/coreos/ignition/v2/config/v3_2/types"
"github.com/diskfs/go-diskfs"
"github.com/go-openapi/errors"
"github.com/pkg/diff"
"github.com/rogpeppe/go-internal/testscript"
"github.com/vincent-petithory/dataurl"
)
// [!] IgnitionImgContains `isoPath` `file` check if the specified file `file`
// is stored within /images/ignition.img archive in the ISO `isoPath` image.
func IgnitionImgContains(ts *testscript.TestScript, neg bool, args []string) {
if len(args) != 2 {
ts.Fatalf("usage: ignitionImgContains isoPath file")
}
workDir := ts.Getenv("WORK")
isoPath, eFilePath := args[0], args[1]
isoPathAbs := filepath.Join(workDir, isoPath)
_, err := extractArchiveFile(isoPathAbs, "/images/ignition.img", eFilePath)
ts.Check(err)
}
// [!] ConfigImgContains `isoPath` `file` check if the specified file `file`
// is stored within the config image ISO.
func ConfigImgContains(ts *testscript.TestScript, neg bool, args []string) {
if len(args) != 2 {
ts.Fatalf("usage: configImgContains isoPath file")
}
workDir := ts.Getenv("WORK")
isoPath, eFilePath := args[0], args[1]
isoPathAbs := filepath.Join(workDir, isoPath)
_, err := extractArchiveFile(isoPathAbs, eFilePath, "")
ts.Check(err)
}
// archiveFileNames `isoPath` get the names of the archive files to use
// based on the name of the ISO image.
func archiveFileNames(isoPath string) (string, string, error) {
if strings.HasPrefix(isoPath, "agent.") || strings.HasPrefix(isoPath, "node.") {
return "/images/ignition.img", "config.ign", nil
} else if strings.HasPrefix(isoPath, "agentconfig.") {
return "/config.gz", "", nil
}
return "", "", errors.NotFound(fmt.Sprintf("ISO %s has unrecognized prefix", isoPath))
}
// [!] UnconfiguredIgnContains `file` check if the specified file `file`
// is stored within the unconfigured ignition Storage Files.
func UnconfiguredIgnContains(ts *testscript.TestScript, neg bool, args []string) {
if len(args) != 1 {
ts.Fatalf("usage: unconfiguredIgnContains file")
}
IgnitionStorageContains(ts, neg, []string{"unconfigured-agent.ign", args[0]})
}
// [!] IgnitionStorageContains `ignPath` `file` check if the specified file `file`
// is stored within the ignition Storage Files.
func IgnitionStorageContains(ts *testscript.TestScript, neg bool, args []string) {
if len(args) != 2 {
ts.Fatalf("usage: ignitionStorageContains ignPath file")
}
workDir := ts.Getenv("WORK")
ignPath, eFilePath := args[0], args[1]
ignPathAbs := filepath.Join(workDir, ignPath)
config, err := readIgnition(ts, ignPathAbs)
ts.Check(err)
found := false
for _, f := range config.Storage.Files {
if f.Path == eFilePath {
found = true
}
}
if !found && !neg {
ts.Fatalf("%s does not contain %s", ignPath, eFilePath)
}
if neg && found {
ts.Fatalf("%s should not contain %s", ignPath, eFilePath)
}
}
// [!] IsoIgnitionContains `isoPath` `file` checks that the file
// `isoFile` - extracted from the ISO embedded configuration file
//
// referenced by `isoPath` - exists.
func IsoIgnitionContains(ts *testscript.TestScript, neg bool, args []string) {
if len(args) != 2 {
ts.Fatalf("usage: isoIgnitionContains isoPath")
}
workDir := ts.Getenv("WORK")
isoPath, eFilePath := args[0], args[1]
isoPathAbs := filepath.Join(workDir, isoPath)
archiveFile, ignitionFile, err := archiveFileNames(isoPath)
if err != nil {
ts.Check(err)
}
_, err = readFileFromISO(isoPathAbs, archiveFile, ignitionFile, eFilePath)
ts.Check(err)
}
// [!] IsoCmp `isoPath` `isoFile` `expectedFile` check that the content of the file
// `isoFile` - extracted from the ISO embedded configuration file referenced
// by `isoPath` - matches the content of the local file `expectedFile`.
// Environment variables in `expectedFile` are substituted before the comparison.
func IsoCmp(ts *testscript.TestScript, neg bool, args []string) {
if len(args) != 3 {
ts.Fatalf("usage: isocmp isoPath file1 file2")
}
workDir := ts.Getenv("WORK")
isoPath, aFilePath, eFilePath := args[0], args[1], args[2]
isoPathAbs := filepath.Join(workDir, isoPath)
archiveFile, ignitionFile, err := archiveFileNames(isoPath)
if err != nil {
ts.Check(err)
}
aData, err := readFileFromISO(isoPathAbs, archiveFile, ignitionFile, aFilePath)
ts.Check(err)
eFilePathAbs := filepath.Join(workDir, eFilePath)
eData, err := os.ReadFile(eFilePathAbs)
ts.Check(err)
byteCompare(ts, neg, aData, eData, aFilePath, eFilePath)
}
// [!] UnconfiguredIgnCmp `fileInIgn` `expectedFile` check that the content
// of the file `fileInIgn` extracted from the unconfigured ignition
// configuration file matches the content of the local file `expectedFile`.
// Environment variables in in `expectedFile` are substituted before the comparison.
func UnconfiguredIgnCmp(ts *testscript.TestScript, neg bool, args []string) {
if len(args) != 2 {
ts.Fatalf("usage: iunconfiguredIgnCmp file1 file2")
}
argsNext := []string{"unconfigured-agent.ign", args[0], args[1]}
ignitionStorageCmp(ts, neg, argsNext)
}
// [!] ignitionStorageCmp `ignPath` `ignFile` `expectedFile` check that the content of the file
// `ignFile` - extracted from the ignition configuration file referenced
// by `ignPath` - matches the content of the local file `expectedFile`.
// Environment variables in in `expectedFile` are substituted before the comparison.
func ignitionStorageCmp(ts *testscript.TestScript, neg bool, args []string) {
if len(args) != 3 {
ts.Fatalf("usage: ignitionStorageCmp ignPath file1 file2")
}
workDir := ts.Getenv("WORK")
ignPath, aFilePath, eFilePath := args[0], args[1], args[2]
ignPathAbs := filepath.Join(workDir, ignPath)
config, err := readIgnition(ts, ignPathAbs)
ts.Check(err)
aData, err := readFileFromIgnitionCfg(&config, aFilePath)
ts.Check(err)
eFilePathAbs := filepath.Join(workDir, eFilePath)
eData, err := os.ReadFile(eFilePathAbs)
ts.Check(err)
byteCompare(ts, neg, aData, eData, aFilePath, eFilePath)
}
func readIgnition(ts *testscript.TestScript, ignPath string) (config igntypes.Config, err error) {
rawIgn, err := os.ReadFile(ignPath)
ts.Check(err)
err = json.Unmarshal(rawIgn, &config)
return config, err
}
// [!] ExpandFile `file...` can be used to substitute environment variables
// references for each file specified.
func ExpandFile(ts *testscript.TestScript, neg bool, args []string) {
if len(args) != 1 {
ts.Fatalf("usage: expandFile file...")
}
workDir := ts.Getenv("WORK")
for _, f := range args {
fileName := filepath.Join(workDir, f)
data, err := os.ReadFile(fileName)
ts.Check(err)
newData := expand(ts, data)
err = os.WriteFile(fileName, []byte(newData), 0)
ts.Check(err)
}
}
func expand(ts *testscript.TestScript, s []byte) string {
return os.Expand(string(s), func(key string) string {
return ts.Getenv(key)
})
}
func byteCompare(ts *testscript.TestScript, neg bool, aData, eData []byte, aFilePath, eFilePath string) {
aText := string(aData)
eText := expand(ts, eData)
eq := aText == eText
if neg {
if eq {
ts.Fatalf("%s and %s do not differ", aFilePath, eFilePath)
}
return
}
if eq {
return
}
ts.Logf(aText)
var sb strings.Builder
if err := diff.Text(eFilePath, aFilePath, eText, aText, &sb); err != nil {
ts.Check(err)
}
ts.Logf("%s", sb.String())
ts.Fatalf("%s and %s differ", eFilePath, aFilePath)
}
func readFileFromISO(isoPath, archiveFile, ignitionFile, nodePath string) ([]byte, error) {
config, err := extractCfgData(isoPath, archiveFile, ignitionFile, nodePath)
if err != nil {
return nil, err
}
return config, nil
}
func readFileFromIgnitionCfg(config *igntypes.Config, nodePath string) ([]byte, error) {
for _, f := range config.Storage.Files {
if f.Node.Path == nodePath {
actualData, err := dataurl.DecodeString(*f.FileEmbedded1.Contents.Source)
if err != nil {
return nil, err
}
return actualData.Data, nil
}
}
return nil, errors.NotFound(nodePath)
}
func extractArchiveFile(isoPath, archive, fileName string) ([]byte, error) {
disk, err := diskfs.Open(isoPath, diskfs.WithOpenMode(diskfs.ReadOnly))
if err != nil {
return nil, err
}
fs, err := disk.GetFilesystem(0)
if err != nil {
return nil, err
}
ignitionImg, err := fs.OpenFile(archive, os.O_RDONLY)
if err != nil {
return nil, err
}
gzipReader, err := gzip.NewReader(ignitionImg)
if err != nil {
return nil, err
}
cpioReader := cpio.NewReader(gzipReader)
for {
header, err := cpioReader.Next()
if err == io.EOF { //nolint:errorlint
// end of cpio archive
break
}
if err != nil {
return nil, err
}
// If the file is not in ignition return it directly
if fileName == "" || header.Name == fileName {
rawContent, err := io.ReadAll(cpioReader)
if err != nil {
return nil, err
}
return rawContent, nil
}
}
return nil, errors.NotFound(fmt.Sprintf("File %s not found within the %s archive", fileName, archive))
}
func extractCfgData(isoPath, archiveFile, ignitionFile, nodePath string) ([]byte, error) {
if ignitionFile == "" {
// If the archive is not part of an ignition file return the archive data
rawContent, err := extractArchiveFile(isoPath, archiveFile, nodePath)
if err != nil {
return nil, err
}
return rawContent, nil
}
rawContent, err := extractArchiveFile(isoPath, archiveFile, ignitionFile)
if err != nil {
return nil, err
}
var config igntypes.Config
err = json.Unmarshal(rawContent, &config)
if err != nil {
return nil, err
}
for _, f := range config.Storage.Files {
if f.Node.Path == nodePath {
actualData, err := dataurl.DecodeString(*f.FileEmbedded1.Contents.Source)
if err != nil {
return nil, err
}
return actualData.Data, nil
}
}
return nil, errors.NotFound(fmt.Sprintf("File %s not found within the %s archive", nodePath, archiveFile))
}
// [!] InitrdImgContains `isoPath` `file` check if the specified file `file`
// is stored within a compressed cpio archive by scanning the content of
// /images/ignition.img archive in the ISO `isoPath` image (note: plain cpio
// archives are ignored).
func InitrdImgContains(ts *testscript.TestScript, neg bool, args []string) {
if len(args) != 2 {
ts.Fatalf("usage: initrdImgContains isoPath file")
}
workDir := ts.Getenv("WORK")
isoPath, eFilePath := args[0], args[1]
isoPathAbs := filepath.Join(workDir, isoPath)
err := checkFileFromInitrdImg(isoPathAbs, eFilePath)
ts.Check(err)
}
// [!] IsoContains `isoPath` `file` check if the specified `file` is stored
// within the ISO `isoPath` image.
func IsoContains(ts *testscript.TestScript, neg bool, args []string) {
if len(args) != 2 {
ts.Fatalf("usage: isoContains isoPath file")
}
workDir := ts.Getenv("WORK")
isoPath, filePath := args[0], args[1]
isoPathAbs := filepath.Join(workDir, isoPath)
disk, err := diskfs.Open(isoPathAbs, diskfs.WithOpenMode(diskfs.ReadOnly))
ts.Check(err)
fs, err := disk.GetFilesystem(0)
ts.Check(err)
_, err = fs.OpenFile(filePath, os.O_RDONLY)
ts.Check(err)
}
// [!] existsInIso `isoPath` `file` check if the specified `file` is stored
// within the ISO `isoPath` image.
func ExistsInIso(ts *testscript.TestScript, neg bool, args []string) {
if len(args) != 2 {
ts.Fatalf("usage: isoContains isoPath file")
}
workDir := ts.Getenv("WORK")
isoPath, filePath := args[0], args[1]
isoPathAbs := filepath.Join(workDir, isoPath)
archiveFile, ignitionFile, err := archiveFileNames(isoPath)
if err != nil {
ts.Check(err)
}
_, err = readFileFromISO(isoPathAbs, archiveFile, ignitionFile, filePath)
ts.Check(err)
}
func checkFileFromInitrdImg(isoPath string, fileName string) error {
disk, err := diskfs.Open(isoPath, diskfs.WithOpenMode(diskfs.ReadOnly))
if err != nil {
return err
}
fs, err := disk.GetFilesystem(0)
if err != nil {
return err
}
initRdImg, err := fs.OpenFile("/images/pxeboot/initrd.img", os.O_RDONLY)
if err != nil {
return err
}
defer initRdImg.Close()
const (
gzipID1 = 0x1f
gzipID2 = 0x8b
gzipDeflate = 0x08
)
buff := make([]byte, 4096)
for {
_, err := initRdImg.Read(buff)
if err == io.EOF { //nolint:errorlint
break
}
foundAt := -1
for idx := 0; idx < len(buff)-2; idx++ {
// scan the buffer for a potential gzip header
if buff[idx+0] == gzipID1 && buff[idx+1] == gzipID2 && buff[idx+2] == gzipDeflate {
foundAt = idx
break
}
}
if foundAt >= 0 {
// check if it's really a compressed cpio archive
delta := int64(foundAt - len(buff))
newPos, err := initRdImg.Seek(delta, io.SeekCurrent)
if err != nil {
break
}
files, err := lookForCpioFiles(initRdImg)
if err != nil {
if _, err := initRdImg.Seek(newPos+2, io.SeekStart); err != nil {
break
}
continue
}
// check if the current cpio files match the required ones
for _, f := range files {
matched, err := filepath.Match(fileName, f)
if err != nil {
return err
}
if matched {
return nil
}
}
}
}
return errors.NotFound(fmt.Sprintf("File %s not found within the /images/pxeboot/initrd.img archive", fileName))
}
func lookForCpioFiles(r io.Reader) ([]string, error) {
var files []string
gr, err := gzip.NewReader(r)
if err != nil {
return nil, err
}
defer gr.Close()
// skip in case of garbage
if gr.OS != 255 && gr.OS >= 13 {
return nil, fmt.Errorf("unknown OS code: %v", gr.Header.OS)
}
cr := cpio.NewReader(gr)
for {
h, err := cr.Next()
if err != nil {
break
}
files = append(files, h.Name)
}
return files, nil
}

View File

@@ -0,0 +1,368 @@
package tshelpers
import (
"archive/tar"
"bytes"
"compress/gzip"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/hex"
"encoding/json"
"encoding/pem"
"fmt"
"math/big"
"net"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"time"
"github.com/google/uuid"
imageapi "github.com/openshift/api/image/v1"
"github.com/openshift/assisted-image-service/pkg/isoeditor"
"github.com/openshift/library-go/pkg/image/dockerv1client"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// FakeOCPRegistry creates a very minimal Docker registry for publishing
// a single fake OCP release image in fixed repo, plus a bunch of
// additional images required by the Agent-based installer.
// The registry is configured to provide just the minimal amount of data
// required by the tests.
type FakeOCPRegistry struct {
mux *http.ServeMux
server *httptest.Server
blobs map[string][]byte
manifests map[string][]byte
tags map[string]string
releaseDigest string
}
// NewFakeOCPRegistry creates a new instance of the fake registry.
func NewFakeOCPRegistry() *FakeOCPRegistry {
return &FakeOCPRegistry{
blobs: make(map[string][]byte),
manifests: make(map[string][]byte),
tags: make(map[string]string),
}
}
// Start configures the handlers, brings up the local server for the
// registry and pre-load the required data for publishing an OCP
// release image.
func (fr *FakeOCPRegistry) Start() error {
fr.mux = http.NewServeMux()
// Ping handler
fr.mux.HandleFunc("/v2/", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Docker-Distribution-Api-Version", "registry/2.0")
json.NewEncoder(w).Encode(make(map[string]interface{}))
})
// This handler is invoked when retrieving the image manifest
fr.mux.HandleFunc("/v2/ocp/release/manifests/{digest}", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/vnd.docker.distribution.manifest.v2+json")
digest := r.PathValue("digest")
manifest, found := fr.manifests[digest]
if !found {
w.WriteHeader(http.StatusNotFound)
return
}
w.Write(manifest)
})
// Generic blobs handler used to serve both the image config and data
fr.mux.HandleFunc("/v2/ocp/release/blobs/{digest}", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/octet-stream")
digest := r.PathValue("digest")
blob, found := fr.blobs[digest]
if !found {
w.WriteHeader(http.StatusNotFound)
return
}
w.Write(blob)
})
// Catch all
fr.mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNotImplemented)
})
err := fr.newTLSServer(fr.mux.ServeHTTP)
if err != nil {
return err
}
fr.server.StartTLS()
err = fr.setupReleasePayload()
if err != nil {
return err
}
return nil
}
func (fr *FakeOCPRegistry) pullSpec(digest string) string {
return fmt.Sprintf("%s/ocp/release@%s", fr.server.URL[len("https://"):], digest)
}
// ReleasePullspec provides an handy method to get the release pull spec.
func (fr *FakeOCPRegistry) ReleasePullspec() string {
return fr.pullSpec(fr.releaseDigest)
}
func addTarFile(tw *tar.Writer, name string, data []byte) {
header := &tar.Header{
Name: name,
Mode: 0600,
Size: int64(len(data)),
}
tw.WriteHeader(header)
tw.Write(data)
}
// Creates a small ISO but good enough to be processed
// by ABI.
func makeMinimalISO() ([]byte, error) {
tempDir, err := os.MkdirTemp("", "nodejoiner-it")
if err != nil {
return nil, err
}
defer os.RemoveAll(tempDir)
files := map[string][]byte{
"iso/images/ignition.img": []byte("ignitionimg"),
"iso/images/pxeboot/initrd.img": []byte("initrdimg"),
"iso/images/efiboot.img": []byte("efibootimg"),
"iso/boot.catalog": []byte("bootcatalog"),
}
for file, content := range files {
dir := filepath.Dir(file)
fullDir := filepath.Join(tempDir, dir)
if err := os.MkdirAll(fullDir, 0755); err != nil {
return nil, err
}
fullPath := filepath.Join(tempDir, file)
f, err := os.Create(fullPath)
if err != nil {
return nil, err
}
defer f.Close()
if _, err = f.Write(content); err != nil {
return nil, err
}
}
baseIso := filepath.Join(tempDir, "baseiso-nj.iso")
if err := isoeditor.Create(baseIso, filepath.Join(tempDir, "iso"), "nj"); err != nil {
return nil, err
}
data, err := os.ReadFile(baseIso)
if err != nil {
return nil, err
}
return data, nil
}
func (fr *FakeOCPRegistry) setupReleasePayload() error {
// agent-installer-utils image
if _, err := fr.PushImage("agent-installer-utils", func(tw *tar.Writer) error {
// fake agent-tui files
addTarFile(tw, "usr/bin/agent-tui", []byte("foo-data"))
addTarFile(tw, "usr/lib64/libnmstate.so.2", []byte("foo-data"))
return nil
}); err != nil {
return err
}
// machine-os-images image
if _, err := fr.PushImage("machine-os-images", func(tw *tar.Writer) error {
// fake base ISO
isoData, err := makeMinimalISO()
if err != nil {
return err
}
addTarFile(tw, "coreos/coreos-x86_64.iso", isoData)
return nil
}); err != nil {
return err
}
// release image
releaseDigest, err := fr.PushImage("release-99.0.0", func(tw *tar.Writer) error {
// images-references file
imageReferences := imageapi.ImageStream{
TypeMeta: metav1.TypeMeta{
Kind: "ImageStream",
APIVersion: "image.openshift.io/v1",
},
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"io.openshift.build.versions": "99.0.0",
},
},
Spec: imageapi.ImageStreamSpec{},
}
for tag, digest := range fr.tags {
imageReferences.Spec.Tags = append(imageReferences.Spec.Tags, imageapi.TagReference{
Name: tag,
From: &corev1.ObjectReference{
Name: fr.pullSpec(digest),
Kind: "DockerImage",
},
})
}
data, _ := json.Marshal(&imageReferences)
addTarFile(tw, "release-manifests/image-references", data)
// release-metadata file
type CincinnatiMetadata struct {
Kind string `json:"kind"`
Version string `json:"version"`
Previous []string `json:"previous"`
Next []string `json:"next,omitempty"`
Metadata map[string]interface{} `json:"metadata,omitempty"`
}
releaseMetadata := CincinnatiMetadata{
Kind: "cincinnati-metadata-v0",
Version: "99.0.0",
}
data, _ = json.Marshal(releaseMetadata)
addTarFile(tw, "release-manifests/release-metadata", data)
return nil
})
if err != nil {
return err
}
fr.releaseDigest = releaseDigest
return nil
}
func (fr *FakeOCPRegistry) newTLSServer(handler http.HandlerFunc) error {
fr.server = httptest.NewUnstartedServer(handler)
cert, err := fr.generateSelfSignedCert()
if err != nil {
return fmt.Errorf("error configuring server cert: %s", err)
}
fr.server.TLS = &tls.Config{
Certificates: []tls.Certificate{cert},
}
return nil
}
func (fr *FakeOCPRegistry) Close() {
fr.server.Close()
}
func (fr *FakeOCPRegistry) generateSelfSignedCert() (tls.Certificate, error) {
// Generate the private key
pk, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return tls.Certificate{}, err
}
// Generate the serial number
sn, err := rand.Int(rand.Reader, big.NewInt(1000000))
if err != nil {
return tls.Certificate{}, err
}
// Create the certificate template
template := x509.Certificate{
SerialNumber: sn,
Subject: pkix.Name{
Organization: []string{"Day2 AddNodes Tester & Co"},
},
NotBefore: time.Now(),
NotAfter: time.Now().Add(1 * time.Hour),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
IPAddresses: []net.IP{net.ParseIP("127.0.0.1")},
}
certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &pk.PublicKey, pk)
if err != nil {
return tls.Certificate{}, err
}
certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER})
keyPEM := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(pk)})
return tls.X509KeyPair(certPEM, keyPEM)
}
// PushImage adds an image to the registry, storing the content provided into a single layer.
func (fr *FakeOCPRegistry) PushImage(tag string, blobFn func(tw *tar.Writer) error) (string, error) {
// Create the image config. Just a few fields are required for oc commands.
config := dockerv1client.DockerImageConfig{
ID: uuid.New().String(),
Architecture: "amd64",
OS: "linux",
Created: time.Now(),
}
configData, err := json.Marshal(config)
if err != nil {
return "", err
}
configDigest := fr.SHA(configData)
fr.blobs[configDigest] = configData
// Create the image blob data, as a gzipped tar content.
var buf bytes.Buffer
gw := gzip.NewWriter(&buf)
tw := tar.NewWriter(gw)
err = blobFn(tw)
if err != nil {
return "", err
}
tw.Close()
gw.Close()
blobData := buf.Bytes()
blobDigest := fr.SHA(blobData)
fr.blobs[blobDigest] = blobData
// Create the image manifest.
manifest := dockerv1client.DockerImageManifest{
SchemaVersion: 2,
MediaType: "application/vnd.docker.distribution.manifest.v2+json",
Config: dockerv1client.Descriptor{
MediaType: "application/vnd.docker.container.image.v1+json",
Digest: configDigest,
},
Layers: []dockerv1client.Descriptor{
{
MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
Digest: blobDigest,
},
},
Name: "ocp/release",
Tag: tag,
}
manifestData, err := json.Marshal(manifest)
if err != nil {
return "", err
}
manifestDigest := fr.SHA(manifestData)
fr.manifests[manifestDigest] = manifestData
fr.tags[tag] = manifestDigest
return manifestDigest, nil
}
func (fr *FakeOCPRegistry) SHA(data []byte) string {
hash := sha256.Sum256(data)
return fmt.Sprintf("sha256:%s", hex.EncodeToString(hash[:]))
}