From d278296334f0431d25a52eefb975e66ece352a7d Mon Sep 17 00:00:00 2001 From: Alexandra Molnar Date: Tue, 31 Jan 2023 11:08:40 +0000 Subject: [PATCH] TELCODOCS-1110: Move LVM Storage documentation to openshift-docs --- _attributes/common-attributes.adoc | 3 + _topic_maps/_topic_map.yml | 14 +- ...agent-based-installed-cluster-for-mce.adoc | 4 +- .../planning-migration-3-4.adoc | 2 +- modules/deploying-lvms-on-sno-cluster.adoc | 37 +++ ...ing-an-logical-volume-manager-cluster.adoc | 81 +++++ ...olume-clones-in-single-node-openshift.adoc | 52 +++ ...me-snapshots-in-single-node-openshift.adoc | 42 +++ ...oned-volumes-in-single-node-openshift.adoc | 18 + ...me-snapshots-in-single-node-openshift.adoc | 30 ++ ...ms-download-log-files-and-diagnostics.adoc | 16 + ...-operator-using-openshift-web-console.adoc | 38 +++ ...l-volume-manager-operator-using-rhacm.adoc | 180 ++++++++++ ...oring-logical-volume-manager-operator.adoc | 48 +++ ...using-logical-volume-manager-operator.adoc | 70 ++++ modules/lvms-reference-file.adoc | 71 ++++ ...me-snapshots-in-single-node-openshift.adoc | 48 +++ ...g-storage-of-single-node-open-concept.adoc | 10 + ...le-node-openshift-cluster-using-rhacm.adoc | 160 +++++++++ ...rage-of-single-node-openshift-cluster.adoc | 53 +++ ...-operator-using-openshift-web-console.adoc | 26 ++ ...l-volume-manager-operator-using-rhacm.adoc | 314 ++++++++++++++++++ modules/lvms-upgrading-lvms-on-sno.adoc | 22 ++ ...olume-clones-in-single-node-openshift.adoc | 9 + ...me-snapshots-in-single-node-openshift.adoc | 19 ++ .../configuring-the-monitoring-stack.adoc | 2 +- .../ibmz-post-install.adoc | 4 +- ...andboxed-containers-1.3-release-notes.adoc | 2 +- .../persistent_storage_local/_attributes | 1 + .../persistent_storage_local/images | 1 + .../persistent_storage_local/modules | 1 + .../persistent-storage-hostpath.adoc | 0 .../persistent-storage-local.adoc | 2 +- .../persistent-storage-using-lvms.adoc | 117 +++++++ .../persistent_storage_local/snippets | 1 + 35 files changed, 1486 insertions(+), 12 deletions(-) create mode 100644 modules/deploying-lvms-on-sno-cluster.adoc create mode 100644 modules/lvms-creating-an-logical-volume-manager-cluster.adoc create mode 100644 modules/lvms-creating-volume-clones-in-single-node-openshift.adoc create mode 100644 modules/lvms-creating-volume-snapshots-in-single-node-openshift.adoc create mode 100644 modules/lvms-deleting-cloned-volumes-in-single-node-openshift.adoc create mode 100644 modules/lvms-deleting-volume-snapshots-in-single-node-openshift.adoc create mode 100644 modules/lvms-download-log-files-and-diagnostics.adoc create mode 100644 modules/lvms-installing-logical-volume-manager-operator-using-openshift-web-console.adoc create mode 100644 modules/lvms-installing-logical-volume-manager-operator-using-rhacm.adoc create mode 100644 modules/lvms-monitoring-logical-volume-manager-operator.adoc create mode 100644 modules/lvms-provisioning-storage-using-logical-volume-manager-operator.adoc create mode 100644 modules/lvms-reference-file.adoc create mode 100644 modules/lvms-restoring-volume-snapshots-in-single-node-openshift.adoc create mode 100644 modules/lvms-scaling-storage-of-single-node-open-concept.adoc create mode 100644 modules/lvms-scaling-storage-of-single-node-openshift-cluster-using-rhacm.adoc create mode 100644 modules/lvms-scaling-storage-of-single-node-openshift-cluster.adoc create mode 100644 modules/lvms-uninstalling-logical-volume-manager-operator-using-openshift-web-console.adoc create mode 100644 modules/lvms-uninstalling-logical-volume-manager-operator-using-rhacm.adoc create mode 100644 modules/lvms-upgrading-lvms-on-sno.adoc create mode 100644 modules/lvms-volume-clones-in-single-node-openshift.adoc create mode 100644 modules/lvms-volume-snapshots-in-single-node-openshift.adoc create mode 120000 storage/persistent_storage/persistent_storage_local/_attributes create mode 120000 storage/persistent_storage/persistent_storage_local/images create mode 120000 storage/persistent_storage/persistent_storage_local/modules rename storage/persistent_storage/{ => persistent_storage_local}/persistent-storage-hostpath.adoc (100%) rename storage/persistent_storage/{ => persistent_storage_local}/persistent-storage-local.adoc (92%) create mode 100644 storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc create mode 120000 storage/persistent_storage/persistent_storage_local/snippets diff --git a/_attributes/common-attributes.adoc b/_attributes/common-attributes.adoc index 1a550d9a24..38c75bc550 100644 --- a/_attributes/common-attributes.adoc +++ b/_attributes/common-attributes.adoc @@ -161,3 +161,6 @@ endif::[] //ifdef::openshift-origin[] //:openshift-networking: OKD Networking //endif::[] +// logical volume manager storage +:lvms-first: Logical volume manager storage (LVM Storage) +:lvms: LVM Storage \ No newline at end of file diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml index 4c80e03fef..7ac99b9e25 100644 --- a/_topic_maps/_topic_map.yml +++ b/_topic_maps/_topic_map.yml @@ -1433,18 +1433,24 @@ Topics: File: persistent-storage-flexvolume - Name: Persistent storage using GCE Persistent Disk File: persistent-storage-gce - - Name: Persistent storage using hostPath - File: persistent-storage-hostpath - Name: Persistent Storage using iSCSI File: persistent-storage-iscsi - - Name: Persistent storage using local volumes - File: persistent-storage-local - Name: Persistent storage using NFS File: persistent-storage-nfs - Name: Persistent storage using Red Hat OpenShift Data Foundation File: persistent-storage-ocs - Name: Persistent storage using VMware vSphere File: persistent-storage-vsphere + - Name: Persistent storage using local storage + Dir: persistent_storage_local + Distros: openshift-enterprise,openshift-origin + Topics: + - Name: Persistent storage using local volumes + File: persistent-storage-local + - Name: Persistent storage using hostPath + File: persistent-storage-hostpath + - Name: Persistent storage using LVM Storage + File: persistent-storage-using-lvms - Name: Using Container Storage Interface (CSI) Dir: container_storage_interface Distros: openshift-enterprise,openshift-origin diff --git a/installing/installing_with_agent_based_installer/preparing-an-agent-based-installed-cluster-for-mce.adoc b/installing/installing_with_agent_based_installer/preparing-an-agent-based-installed-cluster-for-mce.adoc index cbbbc27b77..6c96d6dfe1 100644 --- a/installing/installing_with_agent_based_installer/preparing-an-agent-based-installed-cluster-for-mce.adoc +++ b/installing/installing_with_agent_based_installer/preparing-an-agent-based-installed-cluster-for-mce.adoc @@ -12,7 +12,7 @@ The following procedure is partially automated and requires manual steps after t == Prerequisites * You have read the following documentation: ** link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.6/html/multicluster_engine/multicluster_engine_overview[multicluster engine for Kubernetes]. -** xref:../../storage/persistent_storage/persistent-storage-local.adoc#persistent-storage-using-local-volume[Persistent storage using local volumes]. +** xref:../../storage/persistent_storage/persistent_storage_local/persistent-storage-local.adoc#persistent-storage-using-local-volume[Persistent storage using local volumes]. ** xref:../../scalability_and_performance/ztp_far_edge/ztp-deploying-far-edge-clusters-at-scale.adoc#about-ztp_ztp-deploying-far-edge-clusters-at-scale[Using ZTP to provision clusters at the network far edge]. ** xref:../../installing/installing_with_agent_based_installer/preparing-to-install-with-agent-based-installer.adoc#preparing-to-install-with-agent-based-installer[Preparing to install with the Agent-based Installer]. ** xref:../../installing/disconnected_install/index.adoc#installing-mirroring-disconnected-about[About disconnected installation mirroring]. @@ -27,4 +27,4 @@ include::modules/preparing-an-inital-cluster-deployment-for-mce-connected.adoc[l [role="_additional-resources"] .Additional resources -* xref:../../storage/persistent_storage/persistent-storage-local.adoc#persistent-storage-using-local-volume[The Local Storage Operator] +* xref:../../storage/persistent_storage/persistent_storage_local/persistent-storage-local.adoc#persistent-storage-using-local-volume[The Local Storage Operator] diff --git a/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc b/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc index 1f334af954..494c1527b5 100644 --- a/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc +++ b/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc @@ -84,7 +84,7 @@ Review the following storage changes to consider when transitioning from {produc Local storage is only supported by using the Local Storage Operator in {product-title} {product-version}. It is not supported to use the local provisioner method from {product-title} 3.11. -For more information, see xref:../storage/persistent_storage/persistent-storage-local.adoc#persistent-storage-using-local-volume[Persistent storage using local volumes]. +For more information, see xref:../storage/persistent_storage/persistent_storage_local/persistent-storage-local.adoc#persistent-storage-using-local-volume[Persistent storage using local volumes]. [discrete] ==== FlexVolume persistent storage diff --git a/modules/deploying-lvms-on-sno-cluster.adoc b/modules/deploying-lvms-on-sno-cluster.adoc new file mode 100644 index 0000000000..59810eb6df --- /dev/null +++ b/modules/deploying-lvms-on-sno-cluster.adoc @@ -0,0 +1,37 @@ +// Module included in the following assemblies: +// +// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc + +:_content-type: CONCEPT +[id="lvms-preface-sno-ran_{context}"] += Deploying {lvms} on {sno} clusters + +You can deploy {lvms} on a {sno} bare-metal or user-provisioned infrastructure cluster and configure it to dynamically provision storage for your workloads. + +{lvms} creates a volume group using all the available unused disks and creates a single thin pool with a size of 90% of the volume group. +The remaining 10% of the volume group is left free to enable data recovery by expanding the thin pool when required. +You might need to manually perform such recovery. + +You can use persistent volume claims (PVCs) and volume snapshots provisioned by {lvms} to request storage and create volume snapshots. + +{lvms} configures a default overprovisioning limit of 10 to take advantage of the thin-provisioning feature. +The total size of the volumes and volume snapshots that can be created on the {sno} clusters is 10 times the size of the thin pool. + +You can deploy {lvms} on {sno} clusters using one of the following: + +* {rh-rhacm-first} +* {product-title} Web Console + +[id="lvms-deployment-requirements-for-sno-ran_{context}"] +== Requirements + +Before you begin deploying {lvms} on {sno} clusters, ensure that the following requirements are met: + +* You have installed {rh-rhacm-first} on an {product-title} cluster. +* Every managed {sno} cluster has dedicated disks that are used to provision storage. + +Before you deploy {lvms} on {sno} clusters, be aware of the following limitations: + +* You can only create a single instance of the `LVMCluster` custom resource (CR) on an {product-title} cluster. +* You can make only a single `deviceClass` entry in the `LVMCluster` CR. +* When a device becomes part of the `LVMCluster` CR, it cannot be removed. \ No newline at end of file diff --git a/modules/lvms-creating-an-logical-volume-manager-cluster.adoc b/modules/lvms-creating-an-logical-volume-manager-cluster.adoc new file mode 100644 index 0000000000..77e44849a2 --- /dev/null +++ b/modules/lvms-creating-an-logical-volume-manager-cluster.adoc @@ -0,0 +1,81 @@ +// Module included in the following assemblies: +// +// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc + +:_content-type: PROCEDURE +[id="lvms-creating-lvms-cluster_{context}"] += Creating Logical Volume Manager cluster + +You can create a Logical Volume Manager cluster after you install {lvms}. + +{product-title} supports additional worker nodes for {sno} clusters on bare-metal user-provisioned infrastructure. +{lvms} detects and uses the additional worker nodes when the new nodes show up. +In case you need to set a node filter for the additional worker nodes, you can use the YAML view while creating the cluster. + +[IMPORTANT] +==== +This node filter matching is not the same as the pod label matching. +==== + +.Prerequisites + +* You installed {lvms} from the OperatorHub. + +.Procedure + +. In the {product-title} Web Console, click *Operators → Installed Operators* to view all the installed Operators. ++ +Ensure that the *Project* selected is `openshift-storage`. + +. Click on *LVM Storage*, and then click *Create LVMCluster* under *LVMCluster*. +. In the *Create LVMCluster* page, select either *Form view* or *YAML view*. +. Enter a name for the cluster. +. Click *Create*. +. Optional: To add a node filter, click *YAML view* and specify the filter in the `nodeSelector` section: ++ +[source,yaml] +---- +apiVersion: lvm.topolvm.io/v1alpha1 +kind: LVMCluster +metadata: + name: my-lvmcluster +spec: + storage: + deviceClasses: + - name: vg1 + thinPoolConfig: + name: thin-pool-1 + sizePercent: 90 + overprovisionRatio: 10 + nodeSelector: + nodeSelectorTerms: + - matchExpressions: + - key: app + operator: In + Values: + - test1 +---- + +. Optional: To edit the local device path of the disks, click *YAML view* and specify the device path in the `deviceSelector` section: ++ +[source,yaml] +---- +spec: + storage: + deviceClasses: + - name: vg1 + deviceSelector: + paths: + - /dev/disk/by-path/pci-0000:87:00.0-nvme-1 + - /dev/disk/by-path/pci-0000:88:00.0-nvme-1 + thinPoolConfig: + name: thin-pool-1 + sizePercent: 90 + overprovisionRatio: 10 +---- + +.Verification Steps + +. Click *Storage -> Storage Classes* from the left pane of the {product-title} Web Console. + +. Verify that the `lvms-` storage class is created with the `LVMCluster` creation. By default, `vg1` is the `device-class-name`. \ No newline at end of file diff --git a/modules/lvms-creating-volume-clones-in-single-node-openshift.adoc b/modules/lvms-creating-volume-clones-in-single-node-openshift.adoc new file mode 100644 index 0000000000..50ed308409 --- /dev/null +++ b/modules/lvms-creating-volume-clones-in-single-node-openshift.adoc @@ -0,0 +1,52 @@ +// Module included in the following assemblies: +// +// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc + +:_content-type: PROCEDURE +[id="lvms-creating-volume-clones-in-single-node-openshift_{context}"] += Creating volume clones in {sno} + +You create a clone of a volume to make a point-in-time copy of the data. +A persistent volume claim (PVC) cannot be cloned with a different size. + +[IMPORTANT] +==== +The cloned PVC has write access. +==== + +.Prerequisites + +* You ensured that the PVC is in `Bound` state. This is required for a consistent snapshot. +* You ensured that the `StorageClass` is the same as that of the source PVC. + +.Procedure + +. Identify the storage class of the source PVC. +. To create a volume clone, save the following YAML to a file with a name such as `lvms-vol-clone.yaml`: ++ +.Example YAML to clone a volume +[source,yaml] +---- +apiVersion: v1 +kind: PersistentVolumeClaim +Metadata: + name: lvm-block-1-clone +Spec: + storageClassName: lvms-vg1 + dataSource: + name: lvm-block-1 + kind: PersistentVolumeClaim + accessModes: + - ReadWriteOnce + volumeMode: Block + Resources: + Requests: + storage: 2Gi +---- + +. Create the policy in the same namespace as the source PVC by running the following command: ++ +[source,terminal] +---- +# oc create -f lvms-vol-clone.yaml +---- \ No newline at end of file diff --git a/modules/lvms-creating-volume-snapshots-in-single-node-openshift.adoc b/modules/lvms-creating-volume-snapshots-in-single-node-openshift.adoc new file mode 100644 index 0000000000..5445e38d9a --- /dev/null +++ b/modules/lvms-creating-volume-snapshots-in-single-node-openshift.adoc @@ -0,0 +1,42 @@ +// Module included in the following assemblies: +// +// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc + +:_content-type: PROCEDURE +[id="lvms-creating-volume-snapshots-in-single-node-openshift_{context}"] += Creating volume snapshots in {sno} + +You can create volume snapshots based on the available capacity of the thin pool and the overprovisioning limits. +{lvms} creates a `VolumeSnapshotClass` with the `lvms-` name. + +.Prerequisites + +* You ensured that the persistent volume claim (PVC) is in `Bound` state. This is required for a consistent snapshot. +* You stopped all the I/O to the PVC before taking the snapshot. + +.Procedure + +. Log in to the {sno} for which you need to run the `oc` command. +. Save the following YAML to a file with a name such as `lvms-vol-snapshot.yaml`. ++ +.Example YAML to create a volume snapshot +[source,yaml] +---- +apiVersion: snapshot.storage.k8s.io/v1 +kind: VolumeSnapshot +metadata: + name: lvm-block-1-snap +spec: + volumeSnapshotClassName: lvms-vg1 + source: + persistentVolumeClaimName: lvm-block-1 +---- + +. Create the snapshot by running the following command in the same namespace as the PVC: ++ +[source,terminal] +---- +# oc create -f lvms-vol-snapshot.yaml +---- + +A read-only copy of the PVC is created as a volume snapshot. \ No newline at end of file diff --git a/modules/lvms-deleting-cloned-volumes-in-single-node-openshift.adoc b/modules/lvms-deleting-cloned-volumes-in-single-node-openshift.adoc new file mode 100644 index 0000000000..598f407529 --- /dev/null +++ b/modules/lvms-deleting-cloned-volumes-in-single-node-openshift.adoc @@ -0,0 +1,18 @@ +// Module included in the following assemblies: +// +// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc + +:_content-type: PROCEDURE +[id="lvms-deleting-cloned-volumes-in-single-node-openshift_{context}"] += Deleting cloned volumes in {sno} + +You can delete cloned volumes. + +.Procedure + +* To delete the cloned volume, delete the cloned PVC by running the following command: ++ +[source,terminal] +---- +# oc delete pvc -n +---- \ No newline at end of file diff --git a/modules/lvms-deleting-volume-snapshots-in-single-node-openshift.adoc b/modules/lvms-deleting-volume-snapshots-in-single-node-openshift.adoc new file mode 100644 index 0000000000..f553c6cf97 --- /dev/null +++ b/modules/lvms-deleting-volume-snapshots-in-single-node-openshift.adoc @@ -0,0 +1,30 @@ +// Module included in the following assemblies: +// +// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc + +:_content-type: PROCEDURE +[id="lvms-deleting-volume-snapshots-in-single-node-openshift_{context}"] += Deleting volume snapshots in {sno} + +You can delete volume snapshots resources and persistent volume claims (PVCs). + +.Procedure + +. Delete the volume snapshot resource by running the following command: ++ +[source,terminal] +---- +# oc delete volumesnapshot -n +---- ++ +[NOTE] +==== +When you delete a persistent volume claim (PVC), the snapshots of the PVC are not deleted. +==== + +. To delete the restored volume snapshot, delete the PVC that was created to restore the volume snapshot by running the following command: ++ +[source,terminal] +---- +# oc delete pvc -n +---- \ No newline at end of file diff --git a/modules/lvms-download-log-files-and-diagnostics.adoc b/modules/lvms-download-log-files-and-diagnostics.adoc new file mode 100644 index 0000000000..e4b5055d09 --- /dev/null +++ b/modules/lvms-download-log-files-and-diagnostics.adoc @@ -0,0 +1,16 @@ +// Module included in the following assemblies: +// +// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc + +:_content-type: PROCEDURE +[id="lvms-dowloading-log-files-and-diagnostics_{context}"] += Downloading log files and diagnostic information using must-gather + +When {lvms} is unable to automatically resolve a problem, use the must-gather tool to collect the log files and diagnostic information so that you or the Red Hat Support can review the problem and determine a solution. + +* Run the must-gather command from the client connected to {lvms} cluster by running the following command: ++ +[source,terminal] +---- +$ oc adm must-gather --image=registry.redhat.io/odf4/ocs-must-gather-rhel8:v4.12 --dest-dir= +---- \ No newline at end of file diff --git a/modules/lvms-installing-logical-volume-manager-operator-using-openshift-web-console.adoc b/modules/lvms-installing-logical-volume-manager-operator-using-openshift-web-console.adoc new file mode 100644 index 0000000000..75ed3c4164 --- /dev/null +++ b/modules/lvms-installing-logical-volume-manager-operator-using-openshift-web-console.adoc @@ -0,0 +1,38 @@ +// Module included in the following assemblies: +// +// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc + +:_content-type: PROCEDURE +[id="lvms-installing-lvms-with-web-console_{context}"] += Installing {lvms} using {product-title} Web Console + +You can install {lvms} using the Red Hat {product-title} OperatorHub. + +.Prerequisites + +* You have access to the {sno} cluster. +* You are using an account with the `cluster-admin` and Operator installation permissions. + +.Procedure + +. Log in to the {product-title} Web Console. +. Click *Operators -> OperatorHub*. +. Scroll or type `LVM Storage` into the *Filter by keyword* box to find {lvms}. +. Click *Install*. +. Set the following options on the *Install Operator* page: +.. *Update Channel* as *stable-{product-version}*. +.. *Installation Mode* as *A specific namespace on the cluster*. +.. *Installed Namespace* as *Operator recommended namespace openshift-storage*. + If the `openshift-storage` namespace does not exist, it is created during the operator installation. +.. *Approval Strategy* as *Automatic* or *Manual*. ++ +If you select *Automatic* updates, then the Operator Lifecycle Manager (OLM) automatically upgrades the running instance of your Operator without any intervention. ++ +If you select *Manual* updates, then the OLM creates an update request. +As a cluster administrator, you must then manually approve that update request to update the Operator to a newer version. + +. Click *Install*. + +.Verification steps + +* Verify that {lvms} shows a green tick, indicating successful installation. \ No newline at end of file diff --git a/modules/lvms-installing-logical-volume-manager-operator-using-rhacm.adoc b/modules/lvms-installing-logical-volume-manager-operator-using-rhacm.adoc new file mode 100644 index 0000000000..ff5c1cd13e --- /dev/null +++ b/modules/lvms-installing-logical-volume-manager-operator-using-rhacm.adoc @@ -0,0 +1,180 @@ +// Module included in the following assemblies: +// +// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc + +:_content-type: PROCEDURE +[id="lvms-installing-odf-logical-volume-manager-operator-using-rhacm_{context}"] += Installing {lvms} using {rh-rhacm} + +{lvms} is deployed on {sno} clusters using {rh-rhacm-first}. +You create a `Policy` object on {rh-rhacm} that deploys and configures the Operator when it is applied to managed clusters which match the selector specified in the `PlacementRule` resource. +The policy is also applied to clusters that are imported later and satisfy the placement rule. + +.Prerequisites +* Access to the {rh-rhacm} cluster using an account with `cluster-admin` and Operator installation permissions. +* Dedicated disks on each {sno} cluster to be used by {lvms}. +* The {sno} cluster needs to be managed by {rh-rhacm}, either imported or created. + +.Procedure + +. Log in to the {rh-rhacm} CLI using your {product-title} credentials. + +. Create a namespace in which you will create policies. ++ +[source,terminal] +---- +# oc create ns lvms-policy-ns +---- + +. To create a policy, save the following YAML to a file with a name such as `policy-lvms-operator.yaml`: ++ +[source,yaml] +---- +apiVersion: apps.open-cluster-management.io/v1 +kind: PlacementRule +metadata: + name: placement-install-lvms +spec: + clusterConditions: + - status: "True" + type: ManagedClusterConditionAvailable + clusterSelector: <1> + matchExpressions: + - key: mykey + operator: In + values: + - myvalue +--- +apiVersion: policy.open-cluster-management.io/v1 +kind: PlacementBinding +metadata: + name: binding-install-lvms +placementRef: + apiGroup: apps.open-cluster-management.io + kind: PlacementRule + name: placement-install-lvms +subjects: +- apiGroup: policy.open-cluster-management.io + kind: Policy + name: install-lvms +--- +apiVersion: policy.open-cluster-management.io/v1 +kind: Policy +metadata: + annotations: + policy.open-cluster-management.io/categories: CM Configuration Management + policy.open-cluster-management.io/controls: CM-2 Baseline Configuration + policy.open-cluster-management.io/standards: NIST SP 800-53 + name: install-lvms +spec: + disabled: false + remediationAction: enforce + policy-templates: + - objectDefinition: + apiVersion: policy.open-cluster-management.io/v1 + kind: ConfigurationPolicy + metadata: + name: install-lvms + spec: + object-templates: + - complianceType: musthave + objectDefinition: + apiVersion: v1 + kind: Namespace + metadata: + labels: + openshift.io/cluster-monitoring: "true" + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/audit: privileged + pod-security.kubernetes.io/warn: privileged + name: openshift-storage + - complianceType: musthave + objectDefinition: + apiVersion: operators.coreos.com/v1 + kind: OperatorGroup + metadata: + name: openshift-storage-operatorgroup + namespace: openshift-storage + spec: + targetNamespaces: + - openshift-storage + - complianceType: musthave + objectDefinition: + apiVersion: operators.coreos.com/v1alpha1 + kind: Subscription + metadata: + name: lvms + namespace: openshift-storage + spec: + installPlanApproval: Automatic + name: lvms-operator + source: redhat-operators + sourceNamespace: openshift-marketplace + remediationAction: enforce + severity: low + - objectDefinition: + apiVersion: policy.open-cluster-management.io/v1 + kind: ConfigurationPolicy + metadata: + name: lvms + spec: + object-templates: + - complianceType: musthave + objectDefinition: + apiVersion: lvm.topolvm.io/v1alpha1 + kind: LVMCluster + metadata: + name: my-lvmcluster + namespace: openshift-storage + spec: + storage: + deviceClasses: + - name: vg1 + deviceSelector: <2> + paths: + - /dev/disk/by-path/pci-0000:87:00.0-nvme-1 + - /dev/disk/by-path/pci-0000:88:00.0-nvme-1 + thinPoolConfig: + name: thin-pool-1 + sizePercent: 90 + overprovisionRatio: 10 + nodeSelector: <3> + nodeSelectorTerms: + - matchExpressions: + - key: app + operator: In + values: + - test1 + remediationAction: enforce + severity: low +---- +<1> Replace the key and value in `PlacementRule.spec.clusterSelector` to match the labels set on the {sno} clusters on which you want to install {lvms}. +<2> To control or restrict the volume group to your preferred disks, you can manually specify the local paths of the disks in the `deviceSelector` section of the `LVMCluster` YAML. +<3> To add a node filter, which is a subset of the additional worker nodes, specify the required filter in the `nodeSelector` section. {lvms} detects and uses the additional worker nodes when the new nodes show up. ++ +-- +[IMPORTANT] +==== +This `nodeSelector` node filter matching is not the same as the pod label matching. +==== +-- + +. Create the policy in the namespace by running the following command: ++ +[source,terminal] +---- +# oc create -f policy-lvms-operator.yaml -n lvms-policy-ns <1> +---- +<1> The `policy-lvms-operator.yaml` is the name of the file to which the policy is saved. + ++ +This creates a `Policy`, a `PlacementRule`, and a `PlacementBinding` object in the `lvms-policy-ns` namespace. +The policy creates a `Namespace`, `OperatorGroup`, `Subscription`, and `LVMCluster` resource on the clusters that match the placement rule. +This deploys the Operator on the {sno} clusters which match the selection criteria and configures it to set up the required resources to provision storage. +The Operator uses all the disks specified in the `LVMCluster` CR. +If no disks are specified, the Operator uses all the unused disks on the {sno} node. ++ +[IMPORTANT] +==== +After a device is added to the `LVMCluster`, it cannot be removed. +==== \ No newline at end of file diff --git a/modules/lvms-monitoring-logical-volume-manager-operator.adoc b/modules/lvms-monitoring-logical-volume-manager-operator.adoc new file mode 100644 index 0000000000..b53405cf92 --- /dev/null +++ b/modules/lvms-monitoring-logical-volume-manager-operator.adoc @@ -0,0 +1,48 @@ +// Module included in the following assemblies: +// +// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc + +:_content-type: PROCEDURE +[id="lvms-monitoring-using-lvms_{context}"] += Monitoring {lvms} + +When {lvms} is installed using the {product-title} Web Console, you can monitor the cluster by using the *Block and File* dashboard in the console by default. +However, when you use {rh-rhacm} to install {lvms}, you need to configure {rh-rhacm} Observability to monitor all the {sno} clusters from one place. + +[id="lvms-monitoring-using-lvms-metrics_{context}"] +== Metrics + +You can monitor {lvms} by viewing the metrics exported by the Operator on the {rh-rhacm} dashboards and the alerts that are triggered. + +* Add the following `topolvm` metrics to the `allow` list: ++ +[source,terminal] +---- +topolvm_thinpool_data_percent +topolvm_thinpool_metadata_percent +topolvm_thinpool_size_bytes +---- + +[NOTE] +==== +Metrics are updated every 10 minutes or when there is a change in the thin pool, such as a new logical volume creation. +==== + +[id="lvms-monitoring-using-lvms-alerts_{context}"] +== Alerts + +When the thin pool and volume group are filled up, further operations fail and might lead to data loss. +{lvms} sends the following alerts about the usage of the thin pool and volume group when utilization crosses a certain value: + +.Alerts for Logical Volume Manager cluster in {rh-rhacm} +[[alerts_for_LVMCluster_in_{rh-rhacm}]] +[%autowidth,frame="topbot",options="header"] +|=== +|Alert| Description +|`VolumeGroupUsageAtThresholdNearFull`|This alert is triggered when both the volume group and thin pool utilization cross 75% on nodes. Data deletion or volume group expansion is required. +|`VolumeGroupUsageAtThresholdCritical`|This alert is triggered when both the volume group and thin pool utilization cross 85% on nodes. `VolumeGroup` is critically full. Data deletion or volume group expansion is required. +|`ThinPoolDataUsageAtThresholdNearFull`|This alert is triggered when the thin pool data utilization in the volume group crosses 75% on nodes. Data deletion or thin pool expansion is required. +|`ThinPoolDataUsageAtThresholdCritical`|This alert is triggered when the thin pool data utilization in the volume group crosses 85% on nodes. Data deletion or thin pool expansion is required. +|`ThinPoolMetaDataUsageAtThresholdNearFull`|This alert is triggered when the thin pool metadata utilization in the volume group crosses 75% on nodes. Data deletion or thin pool expansion is required. +|`ThinPoolMetaDataUsageAtThresholdCritical`|This alert is triggered when the thin pool metadata utilization in the volume group crosses 85% on nodes. Data deletion or thin pool expansion is required. +|=== \ No newline at end of file diff --git a/modules/lvms-provisioning-storage-using-logical-volume-manager-operator.adoc b/modules/lvms-provisioning-storage-using-logical-volume-manager-operator.adoc new file mode 100644 index 0000000000..ba8d6630f0 --- /dev/null +++ b/modules/lvms-provisioning-storage-using-logical-volume-manager-operator.adoc @@ -0,0 +1,70 @@ +// Module included in the following assemblies: +// +// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc + +:_content-type: PROCEDURE +[id="lvms-provisioning-storage-using-lvms_{context}"] += Provisioning storage using {lvms} + +You can provision persistent volume claims (PVCs) using the storage class that is created during the Operator installation. You can provision block and file PVCs, however, the storage is allocated only when a pod that uses the PVC is created. + +[NOTE] +==== +{lvms} provisions PVCs in units of 1 GiB. The requested storage is rounded up to the nearest GiB. +==== + +.Procedure + +. Identify the `StorageClass` that is created when {lvms} is deployed. ++ +The `StorageClass` name is in the format, `lvms-`. +The `device-class-name` is the name of the device class that you provided in the `LVMCluster` of the `Policy` YAML. +For example, if the `deviceClass` is called `vg1`, then the `storageClass` name is `lvms-vg1`. ++ +The `volumeBindingMode` of the storage class is set to `WaitForFirstConsumer`. + +. To create a PVC where the application requires storage, save the following YAML to a file with a name such as `pvc.yaml`. ++ +.Example YAML to create a PVC +[source,yaml] +---- +# block pvc +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: lvm-block-1 + namespace: default +spec: + accessModes: + - ReadWriteOnce + volumeMode: Block + resources: + requests: + storage: 10Gi + storageClassName: lvms-vg1 +--- +# file pvc +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: lvm-file-1 + namespace: default +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 10Gi + storageClassName: lvms-vg1 +---- + +. Create the PVC by running the following command: ++ +[source,terminal] +---- +# oc create -f pvc.yaml -ns +---- + ++ +The created PVCs remain in `pending` state until you deploy the pods that use them. \ No newline at end of file diff --git a/modules/lvms-reference-file.adoc b/modules/lvms-reference-file.adoc new file mode 100644 index 0000000000..2308a359c2 --- /dev/null +++ b/modules/lvms-reference-file.adoc @@ -0,0 +1,71 @@ +// Module included in the following assemblies: +// +// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc + +:_content-type: REFERENCE +[id="lvms-reference-file_{context}"] += {lvms} reference YAML file + +The sample `LVMCluster` custom resource (CR) describes all the fields in the YAML file. + +.Example LVMCluster CR +[source,yaml] +---- +apiVersion: lvm.topolvm.io/v1alpha1 +kind: LVMCluster +metadata: + name: my-lvmcluster +spec: + tolerations: + - effect: NoSchedule + key: xyz + operator: Equal + value: "true" + storage: + deviceClasses: <1> + - name: vg1 <2> + nodeSelector: <3> + nodeSelectorTerms: <4> + - matchExpressions: + - key: mykey + operator: In + values: + - ssd + deviceSelector: <5> + paths: + - /dev/disk/by-path/pci-0000:87:00.0-nvme-1 + - /dev/disk/by-path/pci-0000:88:00.0-nvme-1 + - /dev/disk/by-path/pci-0000:89:00.0-nvme-1 + thinPoolConfig: <6> + name: thin-pool-1 <7> + sizePercent: 90 <8> + overprovisionRatio: 10 <9> +status: + deviceClassStatuses: <10> + - name: vg1 + nodeStatus: <11> + - devices: <12> + - /dev/nvme0n1 + - /dev/nvme1n1 + - /dev/nvme2n1 + node: my-node.example.com <13> + status: Ready <14> + ready: true <15> + state: Ready <16> +---- +<1> The LVM volume groups to be created on the cluster. Currently, only a single `deviceClass` is supported. +<2> The name of the LVM volume group to be created on the nodes. +<3> The nodes on which to create the LVM volume group. If the field is empty, all nodes are considered. +<4> A list of node selector requirements. +<5> A list of device paths which is used to create the LVM volume group. If this field is empty, all unused disks on the node will be used. +<6> The LVM thin pool configuration. +<7> The name of the thin pool to be created in the LVM volume group. +<8> The percentage of remaining space in the LVM volume group that should be used for creating the thin pool. +<9> The factor by which additional storage can be provisioned compared to the available storage in the thin pool. +<10> The status of the `deviceClass`. +<11> The status of the LVM volume group on each node. +<12> The list of devices used to create the LVM volume group. +<13> The node on which the `deviceClass` was created. +<14> The status of the LVM volume group on the node. +<15> This field is deprecated. +<16> The status of the `LVMCluster`. \ No newline at end of file diff --git a/modules/lvms-restoring-volume-snapshots-in-single-node-openshift.adoc b/modules/lvms-restoring-volume-snapshots-in-single-node-openshift.adoc new file mode 100644 index 0000000000..f494ad4cec --- /dev/null +++ b/modules/lvms-restoring-volume-snapshots-in-single-node-openshift.adoc @@ -0,0 +1,48 @@ +// Module included in the following assemblies: +// +// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc + +:_content-type: PROCEDURE +[id="lvms-restoring-volume-snapshots-in-single-node-openshift_{context}"] += Restoring volume snapshots in {sno} + +When you restore a volume snapshot, a new persistent volume claim (PVC) is created. +The restored PVC is independent of the volume snapshot and the source PVC. + +.Prerequisites + +* The storage class must be the same as that of the source PVC. +* The size of the requested PVC must be the same as that of the source volume of the snapshot. + +.Procedure + +. Identify the storage class name of the source PVC and volume snapshot name. +. Save the following YAML to a file with a name such as `lvms-vol-restore.yaml` to restore the snapshot. ++ +.Example YAML to restore a PVC. +[source,yaml] +---- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: lvm-block-1-restore +spec: + accessModes: + - ReadWriteOnce + volumeMode: Block + Resources: + Requests: + storage: 2Gi + storageClassName: lvms-vg1 + dataSource: + name: lvm-block-1-snap + kind: VolumeSnapshot + apiGroup: snapshot.storage.k8s.io +---- + +. Create the policy by running the following command in the same namespace as the snapshot: ++ +[source,terminal] +---- +# oc create -f lvms-vol-restore.yaml +---- \ No newline at end of file diff --git a/modules/lvms-scaling-storage-of-single-node-open-concept.adoc b/modules/lvms-scaling-storage-of-single-node-open-concept.adoc new file mode 100644 index 0000000000..ea46fc692f --- /dev/null +++ b/modules/lvms-scaling-storage-of-single-node-open-concept.adoc @@ -0,0 +1,10 @@ +// Module included in the following assemblies: +// +// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc + +:_content-type: CONCEPT +[id="lvms-scaling-storage-of-single-node-openshift-cluster-con_{context}"] += Scaling storage of {sno} clusters + +The {product-title} supports additional worker nodes for {sno} clusters on bare-metal user-provisioned infrastructure. +{lvms} detects and uses the new additional worker nodes when the nodes show up. \ No newline at end of file diff --git a/modules/lvms-scaling-storage-of-single-node-openshift-cluster-using-rhacm.adoc b/modules/lvms-scaling-storage-of-single-node-openshift-cluster-using-rhacm.adoc new file mode 100644 index 0000000000..3f436f2f73 --- /dev/null +++ b/modules/lvms-scaling-storage-of-single-node-openshift-cluster-using-rhacm.adoc @@ -0,0 +1,160 @@ +// Module included in the following assemblies: +// +// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc + +:_content-type: PROCEDURE +[id="lvms-scaling-storage-of-single-node-openshift-cluster-using-rhacm_{context}"] += Scaling up storage by adding capacity to your {sno} cluster using {rh-rhacm} + +You can scale the the storage capacity of your configured worker nodes on a {sno} cluster using {rh-rhacm}. + +.Prerequisites + +* You have access to the {rh-rhacm} cluster using an account with `cluster-admin` privilages. +* You have additional unused disks on each {sno} cluster to be used by {lvms}. + +.Procedure + +. Log in to the {rh-rhacm} CLI using your {product-title} credentials. +. Find the disk that you want to add. The disk to be added needs to match with the device name and path of the existing disks. +. To add capacity to the {sno} cluster, edit the `deviceSelector` section of the existing policy YAML, for example, `policy-lvms-operator.yaml`. + ++ +[NOTE] +==== +In case the `deviceSelector` field is not included during the `LVMCluster` creation, it is not possible to add the `deviceSelector` section to the CR. +You need to remove the `LVMCluster` and then recreate from the new CR. +==== + ++ +[source,yaml] +---- +apiVersion: apps.open-cluster-management.io/v1 +kind: PlacementRule +metadata: + name: placement-install-lvms +spec: + clusterConditions: + - status: "True" + type: ManagedClusterConditionAvailable + clusterSelector: + matchExpressions: + - key: mykey + operator: In + values: + - myvalue +--- +apiVersion: policy.open-cluster-management.io/v1 +kind: PlacementBinding +metadata: + name: binding-install-lvms +placementRef: + apiGroup: apps.open-cluster-management.io + kind: PlacementRule + name: placement-install-lvms +subjects: +- apiGroup: policy.open-cluster-management.io + kind: Policy + name: install-lvms +--- +apiVersion: policy.open-cluster-management.io/v1 +kind: Policy +metadata: + annotations: + policy.open-cluster-management.io/categories: CM Configuration Management + policy.open-cluster-management.io/controls: CM-2 Baseline Configuration + policy.open-cluster-management.io/standards: NIST SP 800-53 + name: install-lvms +spec: + disabled: false + remediationAction: enforce + policy-templates: + - objectDefinition: + apiVersion: policy.open-cluster-management.io/v1 + kind: ConfigurationPolicy + metadata: + name: install-lvms + spec: + object-templates: + - complianceType: musthave + objectDefinition: + apiVersion: v1 + kind: Namespace + metadata: + labels: + openshift.io/cluster-monitoring: "true" + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/audit: privileged + pod-security.kubernetes.io/warn: privileged + name: openshift-storage + - complianceType: musthave + objectDefinition: + apiVersion: operators.coreos.com/v1 + kind: OperatorGroup + metadata: + name: openshift-storage-operatorgroup + namespace: openshift-storage + spec: + targetNamespaces: + - openshift-storage + - complianceType: musthave + objectDefinition: + apiVersion: operators.coreos.com/v1alpha1 + kind: Subscription + metadata: + name: lvms + namespace: openshift-storage + spec: + installPlanApproval: Automatic + name: lvms-operator + source: redhat-operators + sourceNamespace: openshift-marketplace + remediationAction: enforce + severity: low + - objectDefinition: + apiVersion: policy.open-cluster-management.io/v1 + kind: ConfigurationPolicy + metadata: + name: lvms + spec: + object-templates: + - complianceType: musthave + objectDefinition: + apiVersion: lvm.topolvm.io/v1alpha1 + kind: LVMCluster + metadata: + name: my-lvmcluster + namespace: openshift-storage + spec: + storage: + deviceClasses: + - name: vg1 + deviceSelector: + paths: + - /dev/disk/by-path/pci-0000:87:00.0-nvme-1 + - /dev/disk/by-path/pci-0000:88:00.0-nvme-1 + - /dev/disk/by-path/pci-0000:89:00.0-nvme-1 # new disk is added + thinPoolConfig: + name: thin-pool-1 + sizePercent: 90 + overprovisionRatio: 10 + nodeSelector: + nodeSelectorTerms: + - matchExpressions: + - key: app + operator: In + values: + - test1 + remediationAction: enforce + severity: low +---- + +. Edit the policy by running the following command: ++ +[source,terminal] +---- +# oc edit -f policy-lvms-operator.yaml -ns lvms-policy-ns <1> +---- +<1> The `policy-lvms-operator.yaml` is the name of the existing policy. ++ +This uses the new disk specified in the `LVMCluster` CR to provision storage. \ No newline at end of file diff --git a/modules/lvms-scaling-storage-of-single-node-openshift-cluster.adoc b/modules/lvms-scaling-storage-of-single-node-openshift-cluster.adoc new file mode 100644 index 0000000000..1f5ef432c2 --- /dev/null +++ b/modules/lvms-scaling-storage-of-single-node-openshift-cluster.adoc @@ -0,0 +1,53 @@ +// Module included in the following assemblies: +// +// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc + +:_content-type: PROCEDURE +[id="lvms-scaling-storage-of-single-node-openshift-cluster_{context}"] += Scaling up storage by adding capacity to your {sno} cluster + +To scale the storage capacity of your configured worker nodes on a {sno} cluster, you can increase the capacity by adding disks. + +.Prerequisites + +* You have additional unused disks on each {sno} cluster to be used by {lvms}. + +.Procedure + +. Log in to {product-title} console of the {sno} cluster. +. From the *Operators* -> *Installed Operators* page, click on the *LVM Storage Operator* in the `openshift-storage` namespace. +. Click on the *LVMCluster* tab to list the `LVMCluster` CR created on the cluster. +. Select *Edit LVMCluster* from the *Actions* drop-down menu. +. Click on the *YAML* tab. +. Edit the `LVMCluster` CR YAML to add the new device path in the `deviceSelector` section: + ++ +[NOTE] +==== +In case the `deviceSelector` field is not included during the `LVMCluster` creation, it is not possible to add the `deviceSelector` section to the CR. +You need to remove the `LVMCluster` and then create a new CR. +==== + ++ +[source,yaml] +---- +apiVersion: lvm.topolvm.io/v1alpha1 +kind: LVMCluster +metadata: + name: my-lvmcluster +spec: + storage: + deviceClasses: + - name: vg1 + deviceSelector: + paths: + - /dev/disk/by-path/pci-0000:87:00.0-nvme-1 <1> + - /dev/disk/by-path/pci-0000:88:00.0-nvme-1 + - /dev/disk/by-path/pci-0000:89:00.0-nvme-1 <2> + thinPoolConfig: + name: thin-pool-1 + sizePercent: 90 + overprovisionRatio: 10 +---- +<1> The path can be added by name (`/dev/sdb`) or by path. +<2> A new disk is added. \ No newline at end of file diff --git a/modules/lvms-uninstalling-logical-volume-manager-operator-using-openshift-web-console.adoc b/modules/lvms-uninstalling-logical-volume-manager-operator-using-openshift-web-console.adoc new file mode 100644 index 0000000000..18d6507bc7 --- /dev/null +++ b/modules/lvms-uninstalling-logical-volume-manager-operator-using-openshift-web-console.adoc @@ -0,0 +1,26 @@ +// Module included in the following assemblies: +// +// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc + +:_content-type: PROCEDURE +[id="lvms-unstalling-lvms-with-web-console_{context}"] += Uninstalling {lvms} installed using the OpenShift Web Console + +You can unstall {lvms} using the Red Hat OpenShift Container Platform Web Console. + +.Prerequisites + +* You deleted all the applications on the clusters that are using the storage provisioned by {lvms}. +* You deleted the persistent volume claims (PVCs) and persistent volumes (PVs) provisioned using {lvms}. +* You deleted all volume snapshots provisioned by {lvms}. +* You verified that no logical volume resources exist by using the `oc get logicalvolume` command. +* You have access to the {sno} cluster using an account with `cluster-admin` permissions. + +.Procedure + +. From the *Operators* → *Installed Operators* page, scroll to *LVM Storage* or type `LVM Storage` into the *Filter by name* to find and click on it. +. Click on the *LVMCluster* tab. +. On the right-hand side of the *LVMCluster* page, select *Delete LVMCluster* from the *Actions* drop-down menu. +. Click on the *Details* tab. +. On the right-hand side of the *Operator Details* page, select *Uninstall Operator* from the *Actions* drop-down menu. +. Select *Remove*. {lvms} stops running and is completely removed. \ No newline at end of file diff --git a/modules/lvms-uninstalling-logical-volume-manager-operator-using-rhacm.adoc b/modules/lvms-uninstalling-logical-volume-manager-operator-using-rhacm.adoc new file mode 100644 index 0000000000..a1b84ea727 --- /dev/null +++ b/modules/lvms-uninstalling-logical-volume-manager-operator-using-rhacm.adoc @@ -0,0 +1,314 @@ +// Module included in the following assemblies: +// +// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc + +:_content-type: PROCEDURE +[id="lvms-uninstalling-lvms-rhacm_{context}"] += Uninstalling {lvms} installed using {rh-rhacm} + +To uninstall {lvms} that you installed using {rh-rhacm}, you need to delete the {rh-rhacm} policy that you created for deploying and configuring the Operator. + +When you delete the {rh-rhacm} policy, the resources that the policy has created are not removed. +You need to create additional policies to remove the resources. + +As the created resources are not removed when you delete the policy, you need to perform the following steps: + +. Remove all the Persistent volume claims (PVCs) and volume snapshots provisioned by {lvms}. +. Remove the `LVMCluster` resources to clean up Logical Volume Manager resources created on the disks. +. Create an additional policy to uninstall the Operator. + +.Prerequisites + +* Ensure that the following are deleted before deleting the policy: +** All the applications on the managed clusters that are using the storage provisioned by {lvms}. +** PVCs and persistent volumes (PVs) provisioned using {lvms}. +** All volume snapshots provisioned by {lvms}. +* Ensure you have access to the {rh-rhacm} cluster using an account with a `cluster-admin` role. + +.Procedure + +. In the OpenShift CLI (`oc`), delete the {rh-rhacm} policy that you created for deploying and configuring {lvms} on the hub cluster by using the following command: ++ +[source,terminal] +---- +# oc delete -f policy-lvms-operator.yaml -n lvms-policy-ns <1> +---- +<1> The `policy-lvms-operator.yaml` is the name of the file to which the policy was saved. + +. To create a policy for removing the `LVMCluster` resource, save the following YAML to a file with a name such as `lvms-remove-policy.yaml`. +This enables the Operator to clean up all Logical Volume Manager resources that it created on the cluster. ++ +[source,yaml] +---- +apiVersion: policy.open-cluster-management.io/v1 +kind: Policy +metadata: + name: policy-lvmcluster-delete + annotations: + policy.open-cluster-management.io/standards: NIST SP 800-53 + policy.open-cluster-management.io/categories: CM Configuration Management + policy.open-cluster-management.io/controls: CM-2 Baseline Configuration +spec: + remediationAction: enforce + disabled: false + policy-templates: + - objectDefinition: + apiVersion: policy.open-cluster-management.io/v1 + kind: ConfigurationPolicy + metadata: + name: policy-lvmcluster-removal + spec: + remediationAction: enforce <1> + severity: low + object-templates: + - complianceType: mustnothave + objectDefinition: + kind: LVMCluster + apiVersion: lvm.topolvm.io/v1alpha1 + metadata: + name: my-lvmcluster + namespace: openshift-storage <2> +--- +apiVersion: policy.open-cluster-management.io/v1 +kind: PlacementBinding +metadata: + name: binding-policy-lvmcluster-delete +placementRef: + apiGroup: apps.open-cluster-management.io + kind: PlacementRule + name: placement-policy-lvmcluster-delete +subjects: + - apiGroup: policy.open-cluster-management.io + kind: Policy + name: policy-lvmcluster-delete +--- +apiVersion: apps.open-cluster-management.io/v1 +kind: PlacementRule +metadata: + name: placement-policy-lvmcluster-delete +spec: + clusterConditions: + - status: "True" + type: ManagedClusterConditionAvailable + clusterSelector: + matchExpressions: + - key: mykey + operator: In + values: + - myvalue +---- +<1> The `policy-template` `spec.remediationAction` is overridden by the preceding parameter value for `spec.remediationAction`. +<2> This `namespace` field must have the `openshift-storage` value. + +. Set the value of the `PlacementRule.spec.clusterSelector` field to select the clusters from which to uninstall {lvms}. + +. Create the policy by running the following command: ++ +[source,terminal] +---- +# oc create -f lvms-remove-policy.yaml -n lvms-policy-ns +---- + +. To create a policy to check if the `LVMCluster` CR has been removed, save the following YAML to a file with a name such as `check-lvms-remove-policy.yaml`: ++ +[source,yaml] +---- +apiVersion: policy.open-cluster-management.io/v1 +kind: Policy +metadata: + name: policy-lvmcluster-inform + annotations: + policy.open-cluster-management.io/standards: NIST SP 800-53 + policy.open-cluster-management.io/categories: CM Configuration Management + policy.open-cluster-management.io/controls: CM-2 Baseline Configuration +spec: + remediationAction: inform + disabled: false + policy-templates: + - objectDefinition: + apiVersion: policy.open-cluster-management.io/v1 + kind: ConfigurationPolicy + metadata: + name: policy-lvmcluster-removal-inform + spec: + remediationAction: inform <1> + severity: low + object-templates: + - complianceType: mustnothave + objectDefinition: + kind: LVMCluster + apiVersion: lvm.topolvm.io/v1alpha1 + metadata: + name: my-lvmcluster + namespace: openshift-storage <2> +--- +apiVersion: policy.open-cluster-management.io/v1 +kind: PlacementBinding +metadata: + name: binding-policy-lvmcluster-check +placementRef: + apiGroup: apps.open-cluster-management.io + kind: PlacementRule + name: placement-policy-lvmcluster-check +subjects: + - apiGroup: policy.open-cluster-management.io + kind: Policy + name: policy-lvmcluster-inform +--- +apiVersion: apps.open-cluster-management.io/v1 +kind: PlacementRule +metadata: + name: placement-policy-lvmcluster-check +spec: + clusterConditions: + - status: "True" + type: ManagedClusterConditionAvailable + clusterSelector: + matchExpressions: + - key: mykey + operator: In + values: + - myvalue +---- +<1> The `policy-template` `spec.remediationAction` is overridden by the preceding parameter value for `spec.remediationAction`. +<2> The `namespace` field must have the `openshift-storage` value. + +. Create the policy by running the following command: ++ +[source,terminal] +---- +# oc create -f check-lvms-remove-policy.yaml -n lvms-policy-ns +---- + +. Check the policy status by running the following command: ++ +[source,terminal] +---- +# oc get policy -n lvms-policy-ns +---- + ++ +.Example output +[source,terminal] +---- +NAME REMEDIATION ACTION COMPLIANCE STATE AGE +policy-lvmcluster-delete enforce Compliant 15m +policy-lvmcluster-inform inform Compliant 15m +---- + +. After both the policies are compliant, save the following YAML to a file with a name such as `lvms-uninstall-policy.yaml` to create a policy to uninstall {lvms}. ++ +[source,yaml] +---- +apiVersion: apps.open-cluster-management.io/v1 +kind: PlacementRule +metadata: + name: placement-uninstall-lvms +spec: + clusterConditions: + - status: "True" + type: ManagedClusterConditionAvailable + clusterSelector: + matchExpressions: + - key: mykey + operator: In + values: + - myvalue +--- +apiVersion: policy.open-cluster-management.io/v1 +kind: PlacementBinding +metadata: + name: binding-uninstall-lvms +placementRef: + apiGroup: apps.open-cluster-management.io + kind: PlacementRule + name: placement-uninstall-lvms +subjects: +- apiGroup: policy.open-cluster-management.io + kind: Policy + name: uninstall-lvms +--- +apiVersion: policy.open-cluster-management.io/v1 +kind: Policy +metadata: + annotations: + policy.open-cluster-management.io/categories: CM Configuration Management + policy.open-cluster-management.io/controls: CM-2 Baseline Configuration + policy.open-cluster-management.io/standards: NIST SP 800-53 + name: uninstall-lvms +spec: + disabled: false + policy-templates: + - objectDefinition: + apiVersion: policy.open-cluster-management.io/v1 + kind: ConfigurationPolicy + metadata: + name: uninstall-lvms + spec: + object-templates: + - complianceType: mustnothave + objectDefinition: + apiVersion: v1 + kind: Namespace + metadata: + name: openshift-storage + - complianceType: mustnothave + objectDefinition: + apiVersion: operators.coreos.com/v1 + kind: OperatorGroup + metadata: + name: openshift-storage-operatorgroup + namespace: openshift-storage + spec: + targetNamespaces: + - openshift-storage + - complianceType: mustnothave + objectDefinition: + apiVersion: operators.coreos.com/v1alpha1 + kind: Subscription + metadata: + name: lvms-operator + namespace: openshift-storage + remediationAction: enforce + severity: low + - objectDefinition: + apiVersion: policy.open-cluster-management.io/v1 + kind: ConfigurationPolicy + metadata: + name: policy-remove-lvms-crds + spec: + object-templates: + - complianceType: mustnothave + objectDefinition: + apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + name: logicalvolumes.topolvm.io + - complianceType: mustnothave + objectDefinition: + apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + name: lvmclusters.lvm.topolvm.io + - complianceType: mustnothave + objectDefinition: + apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + name: lvmvolumegroupnodestatuses.lvm.topolvm.io + - complianceType: mustnothave + objectDefinition: + apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + name: lvmvolumegroups.lvm.topolvm.io + remediationAction: enforce + severity: high +---- + +. Create the policy by running the following command: ++ +[source,terminal] +---- +# oc create -f lvms-uninstall-policy.yaml -ns lvms-policy-ns +---- \ No newline at end of file diff --git a/modules/lvms-upgrading-lvms-on-sno.adoc b/modules/lvms-upgrading-lvms-on-sno.adoc new file mode 100644 index 0000000000..105cffccb8 --- /dev/null +++ b/modules/lvms-upgrading-lvms-on-sno.adoc @@ -0,0 +1,22 @@ +// Module included in the following assemblies: +// +// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc + +:_content-type: PROCEDURE +[id="lvms-upgrading-lvms-on-sno_{context}"] += Upgrading {lvms} on {sno} clusters + +Currently, it is not possible to upgrade from {rh-storage} Logical Volume Manager Operator 4.11 to {lvms} 4.12 on {sno} clusters. + +[IMPORTANT] +==== +The data will not be preserved during this process. +==== + +.Procedure + +. Back up any data that you want to preserve on the persistent volume claims (PVCs). +. Delete all PVCs provisioned by the {rh-storage} Logical Volume Manager Operator and their pods. +. Reinstall {lvms} on {product-title} 4.12. +. Recreate the workloads. +. Copy the backup data to the PVCs created after upgrading to 4.12. \ No newline at end of file diff --git a/modules/lvms-volume-clones-in-single-node-openshift.adoc b/modules/lvms-volume-clones-in-single-node-openshift.adoc new file mode 100644 index 0000000000..44bfc5d224 --- /dev/null +++ b/modules/lvms-volume-clones-in-single-node-openshift.adoc @@ -0,0 +1,9 @@ +// Module included in the following assemblies: +// +// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc + +:_content-type: CONCEPT +[id="lvms-volume-cloning-for-single-node-openshift-cluster_{context}"] += Volume cloning for {sno} + +A clone is a duplicate of an existing storage volume that can be used like any standard volume. \ No newline at end of file diff --git a/modules/lvms-volume-snapshots-in-single-node-openshift.adoc b/modules/lvms-volume-snapshots-in-single-node-openshift.adoc new file mode 100644 index 0000000000..188216ca7d --- /dev/null +++ b/modules/lvms-volume-snapshots-in-single-node-openshift.adoc @@ -0,0 +1,19 @@ +// Module included in the following assemblies: +// +// storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc + +:_content-type: CONCEPT +[id="lvms-volume-snapsot-for-sno_{context}"] += Volume snapshots for {sno} + +You can take volume snapshots of persistent volumes (PVs) that are provisioned by {lvms}. +You can also create volume snapshots of the cloned volumes. Volume snapshots help you to do the following: + +* Back up your application data. ++ +[IMPORTANT] +==== +Volume snapshots are located on the same devices as the original data. To use the volume snapshots as backups, you need to move the snapshots to a secure location. You can use OpenShift API for Data Protection backup and restore solutions. +==== + +* Revert to a state at which the volume snapshot was taken. \ No newline at end of file diff --git a/monitoring/configuring-the-monitoring-stack.adoc b/monitoring/configuring-the-monitoring-stack.adoc index acd3925024..d2f2d18f3c 100644 --- a/monitoring/configuring-the-monitoring-stack.adoc +++ b/monitoring/configuring-the-monitoring-stack.adoc @@ -91,7 +91,7 @@ include::modules/monitoring-modifying-the-retention-time-for-thanos-ruler-metric * xref:../scalability_and_performance/optimizing-storage.adoc#recommended-configurable-storage-technology_persistent-storage[Recommended configurable storage technology] * xref:../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[Understanding persistent storage] * xref:../scalability_and_performance/optimizing-storage.adoc#optimizing-storage[Optimizing storage] -* xref:../storage/persistent_storage/persistent-storage-local.adoc#persistent-storage-using-local-volume[Configure local persistent storage] +* xref:../storage/persistent_storage/persistent_storage_local/persistent-storage-local.adoc#persistent-storage-using-local-volume[Configure local persistent storage] * xref:../monitoring/enabling-monitoring-for-user-defined-projects.adoc#enabling-monitoring-for-user-defined-projects[Enabling monitoring for user-defined projects] // Configuring remote write storage for Prometheus diff --git a/post_installation_configuration/ibmz-post-install.adoc b/post_installation_configuration/ibmz-post-install.adoc index 2190b662d3..3839496ffb 100644 --- a/post_installation_configuration/ibmz-post-install.adoc +++ b/post_installation_configuration/ibmz-post-install.adoc @@ -29,7 +29,7 @@ include::modules/ibmz-configure-devices-mco.adoc[leveloffset=+1] .Next steps -* xref:../storage/persistent_storage/persistent-storage-local.adoc#persistent-storage-using-local-volume[Install and configure the Local Storage Operator] +* xref:../storage/persistent_storage/persistent_storage_local/persistent-storage-local.adoc#persistent-storage-using-local-volume[Install and configure the Local Storage Operator] * xref:../networking/k8s_nmstate/k8s-nmstate-updating-node-network-config.adoc#k8s-nmstate-updating-node-network-config[Updating node network configuration] include::modules/ibmz-configure-devices-manually.adoc[leveloffset=+1] @@ -46,5 +46,5 @@ include::modules/ibmz-enable-multipathing-fcp-luns.adoc[leveloffset=+1] .Next steps -* xref:../storage/persistent_storage/persistent-storage-local.adoc#persistent-storage-using-local-volume[Install and configure the Local Storage Operator] +* xref:../storage/persistent_storage/persistent_storage_local/persistent-storage-local.adoc#persistent-storage-using-local-volume[Install and configure the Local Storage Operator] * xref:../networking/k8s_nmstate/k8s-nmstate-updating-node-network-config.adoc#k8s-nmstate-updating-node-network-config[Updating node network configuration] diff --git a/sandboxed_containers/sandboxed-containers-1.3-release-notes.adoc b/sandboxed_containers/sandboxed-containers-1.3-release-notes.adoc index fd559cea4d..3706c0f8ba 100644 --- a/sandboxed_containers/sandboxed-containers-1.3-release-notes.adoc +++ b/sandboxed_containers/sandboxed-containers-1.3-release-notes.adoc @@ -53,7 +53,7 @@ With this release, the SELinux policy is available when the monitor pod is creat + Following SELinux policy on the host guarantees full isolation of the host file system from the sandboxed workload by default. This also provides stronger protection against potential security flaws in the `virtiofsd` daemon or QEMU. + -If the mounted files or directories do not have specific SELinux requirements on the host, you can use local persistent volumes as an alternative. Files are automatically relabeled to `container_file_t`, following SELinux policy for container runtimes. See xref:../storage/persistent_storage/persistent-storage-local.adoc#persistent-storage-local[Persistent storage using local volumes] for more information. +If the mounted files or directories do not have specific SELinux requirements on the host, you can use local persistent volumes as an alternative. Files are automatically relabeled to `container_file_t`, following SELinux policy for container runtimes. See xref:../storage/persistent_storage/persistent_storage_local/persistent-storage-local.adoc#persistent-storage-local[Persistent storage using local volumes] for more information. + Automatic relabeling is not an option when mounted files or directories are expected to have specific SELinux labels on the host. Instead, you can set custom SELinux rules on the host to allow the `virtiofsd` daemon to access these specific labels. (link:https://bugzilla.redhat.com/show_bug.cgi?id=1904609[*BZ#1904609*]) diff --git a/storage/persistent_storage/persistent_storage_local/_attributes b/storage/persistent_storage/persistent_storage_local/_attributes new file mode 120000 index 0000000000..20cc1dcb77 --- /dev/null +++ b/storage/persistent_storage/persistent_storage_local/_attributes @@ -0,0 +1 @@ +../../_attributes/ \ No newline at end of file diff --git a/storage/persistent_storage/persistent_storage_local/images b/storage/persistent_storage/persistent_storage_local/images new file mode 120000 index 0000000000..5e67573196 --- /dev/null +++ b/storage/persistent_storage/persistent_storage_local/images @@ -0,0 +1 @@ +../images \ No newline at end of file diff --git a/storage/persistent_storage/persistent_storage_local/modules b/storage/persistent_storage/persistent_storage_local/modules new file mode 120000 index 0000000000..36719b9de7 --- /dev/null +++ b/storage/persistent_storage/persistent_storage_local/modules @@ -0,0 +1 @@ +../../modules/ \ No newline at end of file diff --git a/storage/persistent_storage/persistent-storage-hostpath.adoc b/storage/persistent_storage/persistent_storage_local/persistent-storage-hostpath.adoc similarity index 100% rename from storage/persistent_storage/persistent-storage-hostpath.adoc rename to storage/persistent_storage/persistent_storage_local/persistent-storage-hostpath.adoc diff --git a/storage/persistent_storage/persistent-storage-local.adoc b/storage/persistent_storage/persistent_storage_local/persistent-storage-local.adoc similarity index 92% rename from storage/persistent_storage/persistent-storage-local.adoc rename to storage/persistent_storage/persistent_storage_local/persistent-storage-local.adoc index b8bd8012ee..378f0d33dd 100644 --- a/storage/persistent_storage/persistent-storage-local.adoc +++ b/storage/persistent_storage/persistent_storage_local/persistent-storage-local.adoc @@ -37,7 +37,7 @@ include::modules/persistent-storage-local-tolerations.adoc[leveloffset=+1] include::modules/persistent-storage-local-metrics.adoc[leveloffset=+1] -For more information about metrics, see xref:../../monitoring/managing-metrics.adoc#managing-metric[Managing metrics]. +For more information about metrics, see xref:../../../monitoring/managing-metrics.adoc#managing-metric[Managing metrics]. == Deleting the Local Storage Operator resources diff --git a/storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc b/storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc new file mode 100644 index 0000000000..8efad837e1 --- /dev/null +++ b/storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc @@ -0,0 +1,117 @@ +:_content-type: ASSEMBLY +[id="persistent-storage-using-lvms"] += Persistent storage using logical volume manager storage +include::_attributes/common-attributes.adoc[] +:context: logical-volume-manager-storage + +toc::[] + +{lvms-first} uses the TopoLVM CSI driver to dynamically provision local storage on {sno} clusters. + +{lvms} creates thin-provisioned volumes using Logical Volume Manager and provides dynamic provisioning of block storage on a limited resources {sno} cluster. + +//deploying/requirements with RHACM +include::modules/deploying-lvms-on-sno-cluster.adoc[leveloffset=+1] + +[role="_additional-resources"] +.Additional resources + +* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.6/html/install/installing#installing-while-connected-online[Red Hat Advanced Cluster Management for Kubernetes: Installing while connected online] + +//deploying/requirements with web console +include::modules/lvms-installing-logical-volume-manager-operator-using-openshift-web-console.adoc[leveloffset=+2] +include::modules/lvms-uninstalling-logical-volume-manager-operator-using-openshift-web-console.adoc[leveloffset=+2] + +include::modules/lvms-installing-logical-volume-manager-operator-using-rhacm.adoc[leveloffset=+2] + +[role="_additional-resources"] +.Additional resources + +* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.6/html/install/installing#installing-while-connected-online[Red Hat Advanced Cluster Management for Kubernetes: Installing while connected online] + +* xref:../../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#lvms-reference-file_logical-volume-manager-storage[{lvms} reference YAML file] + + +include::modules/lvms-uninstalling-logical-volume-manager-operator-using-rhacm.adoc[leveloffset=+2] + +[role="_additional-resources"] +.Additional resources + +* xref:../../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#lvms-reference-file_logical-volume-manager-storage[{lvms} reference YAML file] + +include::modules/lvms-creating-an-logical-volume-manager-cluster.adoc[leveloffset=+1] + +[role="_additional-resources"] +.Additional resources + +* xref:../../../nodes/nodes/nodes-sno-worker-nodes.adoc[Adding worker nodes to {sno} clusters]. + +* xref:../../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#lvms-reference-file_logical-volume-manager-storage[{lvms} reference YAML file] + +//Provisioning +include::modules/lvms-provisioning-storage-using-logical-volume-manager-operator.adoc[leveloffset=+1] + +//Monitoring +include::modules/lvms-monitoring-logical-volume-manager-operator.adoc[leveloffset=+1] + +[role="_additional-resources"] +.Additional resources + +* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.6/html-single/observability/index[Observability] + +* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.6/html-single/observability/index#adding-custom-metrics[Adding custom metrics] + +//Scaling +include::modules/lvms-scaling-storage-of-single-node-open-concept.adoc[leveloffset=+1] + +[role="_additional-resources"] +.Additional resources + +* xref:../../../nodes/nodes/nodes-sno-worker-nodes.adoc[Adding worker nodes to {sno} clusters] + +include::modules/lvms-scaling-storage-of-single-node-openshift-cluster.adoc[leveloffset=+2] + +[role="_additional-resources"] +.Additional resources + +* xref:../../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#lvms-reference-file_logical-volume-manager-storage[{lvms} reference YAML file] + +include::modules/lvms-scaling-storage-of-single-node-openshift-cluster-using-rhacm.adoc[leveloffset=+2] + +[role="_additional-resources"] +.Additional resources + +* link:https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.6/html/install/installing#installing-while-connected-online[Red Hat Advanced Cluster Management for Kubernetes: Installing while connected online] + +* xref:../../../storage/persistent_storage/persistent_storage_local/persistent-storage-using-lvms.adoc#lvms-reference-file_logical-volume-manager-storage[{lvms} reference YAML file] + +//Upgrading +include::modules/lvms-upgrading-lvms-on-sno.adoc[leveloffset=+1] + +//Volume snapshots +include::modules/lvms-volume-snapshots-in-single-node-openshift.adoc[leveloffset=+1] + +[role="_additional-resources"] +.Additional resources + +* xref:../../../backup_and_restore/application_backup_and_restore/oadp-features-plugins.adoc#oadp-features_oadp-features-plugins[OADP features] + +include::modules/lvms-creating-volume-snapshots-in-single-node-openshift.adoc[leveloffset=+2] +include::modules/lvms-restoring-volume-snapshots-in-single-node-openshift.adoc[leveloffset=+2] +include::modules/lvms-deleting-volume-snapshots-in-single-node-openshift.adoc[leveloffset=+2] + +//Volume cloning +include::modules/lvms-volume-clones-in-single-node-openshift.adoc[leveloffset=+1] +include::modules/lvms-creating-volume-clones-in-single-node-openshift.adoc[leveloffset=+2] +include::modules/lvms-deleting-cloned-volumes-in-single-node-openshift.adoc[leveloffset=+2] + +//Must-gather +include::modules/lvms-download-log-files-and-diagnostics.adoc[leveloffset=+1] + +[role="_additional-resources"] +.Additional resources + +* xref:../../../support/gathering-cluster-data.adoc#about-must-gather_gathering-cluster-data[About the must-gather tool] + +//Reference +include::modules/lvms-reference-file.adoc[leveloffset=+1] \ No newline at end of file diff --git a/storage/persistent_storage/persistent_storage_local/snippets b/storage/persistent_storage/persistent_storage_local/snippets new file mode 120000 index 0000000000..5a3f5add14 --- /dev/null +++ b/storage/persistent_storage/persistent_storage_local/snippets @@ -0,0 +1 @@ +../../snippets/ \ No newline at end of file