From 99301c300b24af6e6453ebe166b021c1ec245837 Mon Sep 17 00:00:00 2001 From: Ilya Buziuk Date: Tue, 26 Apr 2022 09:39:47 +0200 Subject: [PATCH] docs: updating minikube and crc installation guides (#2302) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Kevin Owen Co-authored-by: Fabrice Flore-Thébault --- .../pages/installing-che-on-minikube.adoc | 2 - ...o-install-che-on-codeready-containers.adoc | 39 ++----- .../partials/proc_using-minikube-and-lxc.adoc | 107 ------------------ ...c_using-minikube-to-set-up-kubernetes.adoc | 4 +- 4 files changed, 11 insertions(+), 141 deletions(-) delete mode 100644 modules/administration-guide/partials/proc_using-minikube-and-lxc.adoc diff --git a/modules/administration-guide/pages/installing-che-on-minikube.adoc b/modules/administration-guide/pages/installing-che-on-minikube.adoc index 5bc13d4e8b..26cefdd1fd 100644 --- a/modules/administration-guide/pages/installing-che-on-minikube.adoc +++ b/modules/administration-guide/pages/installing-che-on-minikube.adoc @@ -18,6 +18,4 @@ include::partial$proc_using-minikube-to-set-up-kubernetes.adoc[leveloffset=+1] include::partial$proc_installing-che-on-minikube-using-chectl.adoc[leveloffset=+1] -include::partial$proc_using-minikube-and-lxc.adoc[leveloffset=+1] - :context: {parent-context-of-installing-che-on-minikube} diff --git a/modules/administration-guide/partials/proc_using-chectl-to-install-che-on-codeready-containers.adoc b/modules/administration-guide/partials/proc_using-chectl-to-install-che-on-codeready-containers.adoc index 3f00c411b4..ae1183a3dc 100644 --- a/modules/administration-guide/partials/proc_using-chectl-to-install-che-on-codeready-containers.adoc +++ b/modules/administration-guide/partials/proc_using-chectl-to-install-che-on-codeready-containers.adoc @@ -5,51 +5,39 @@ [id="using-{prod-cli}-to-install-{prod-id-short}-on-codeready-containers_{context}"] = Using {prod-cli} to install {prod-short} on CodeReady Containers -This section describes how to install {prod-short} on CodeReady Containers (CRC) using `{prod-cli}`. +This section describes how to install {prod-short} on CodeReady Containers using `{prod-cli}`. .Prerequisites * The `{prod-cli}` management tool is available. See xref:using-the-chectl-management-tool.adoc[]. * An installation of CodeReady Containers. See link:https://console.redhat.com/openshift/create/local[Installing CodeReady Containers]. -* The path to the user's CRC pull secret is known. +* The path to the user's CodeReady Containers pull secret is known as ____. .Procedure -. Initiate the CRC platform: - -.. Configure your host machine for CodeReady Containers: -+ ----- -$ crc setup ----- - -.. Remove any previous cluster: +. Remove any previous cluster: + ---- $ crc delete ---- -.. Initiate the `oc` command line interface within CRC: +. Configure your host machine for CodeReady Containers: + ---- -$ eval $(crc oc-env) +$ crc setup ---- -.. Start the CodeReady Containers virtual machine with at least 12 GB of RAM and specify the path to your pull secret: +. Start the CodeReady Containers virtual machine with at least 12 GB of RAM and specify the path to your pull secret, and take note of the password for the user `kubeadmin`, displayed at the end of the CodeReady Containers initiation: + [subs="+quotes"] ---- -$ crc start --memory 12288 --pull-secret-file ____pull-secret.txt +$ crc start --memory 12288 --pull-secret-file ____ ---- -.. Take note of the password for the user `kubeadmin`, displayed at the end of the CRC initiation. - -. Log into the OpenShift cluster using the `kubeadmin` username, the corresponding `____` password, and the CRC URL pointer displayed at the end of the CRC initiation phase: +. Enable access to the `oc` command line interface embedded in CodeReady Containers: + -[subs="+quotes"] ---- -$ oc login --username="kubeadmin" --password="____" https://api.crc.testing:6443 - +$ eval $(crc oc-env) ---- . Install the {prod-short} instance using {prod-cli}: @@ -59,15 +47,6 @@ $ oc login --username="kubeadmin" --password="____" https:// $ {prod-cli} server:deploy --platform crc ---- -. Use the Users Dashboard CRC testing URL and sign in as: -+ ----- -Login: developer -Password: developer ----- - -. Enter the user's credentials and start using {prod-short}. - .Additional resources diff --git a/modules/administration-guide/partials/proc_using-minikube-and-lxc.adoc b/modules/administration-guide/partials/proc_using-minikube-and-lxc.adoc deleted file mode 100644 index 39d5db91ec..0000000000 --- a/modules/administration-guide/partials/proc_using-minikube-and-lxc.adoc +++ /dev/null @@ -1,107 +0,0 @@ -// Module included in the following assemblies: -// -// installing-{prod-id-short}-on-minikube - -[id="using-minikube-with-lxc_{context}"] -= Running Minikube inside an LXC container - -This section describes how to properly configure an LXC container for Minikube when the hypervisor uses ZFS, Btrfs, or LVM to provision the containers storage. - -[NOTE] -==== -.Background - -The `{prod-cli}` command-line tool requires the Minikube Ingress plug-in to be enabled in Minikube. At the same time, the Minikube Ingress plug-in requires the Docker daemon to be running with the overlay filesystem driver. - -.Problem - -According to link:https://docs.docker.com/storage/storagedriver/select-storage-driver/[Docker storage drivers], the Docker overlay2 driver is only supported with the Ext4 and XFS file systems (with `ftype=1`). - -.Solution - -create a virtual block device inside a volume, which in the case of Btrfs is impossible and requires to use a file as the virtual block device. -==== - -.Procedure - -In the following instructions, change the `zfsPool` or LVM `volume_group` name and `dockerstorage` according to your use case and preferences. - -. Create a fixed size ZFS dataset or LVM volume on the hypervisor side: -+ ----- -$ zfs create -V 50G zfsPool/dockerstorage #USING ZFS -$ lvcreate -L 50G -n dockerstorage volumegroup_name #USING LVM ----- - -. Use a partition tool to create a partition inside the virtual block device: -+ ----- -$ parted /dev/zvol/zfsPool/dockerstorage --script mklabel gpt #USING ZFS -$ parted /dev/zvol/zfsPool/dockerstorage --script mkpart primary 1 100% #USING ZFS -$ parted /dev/mapper/volumegroup_name-dockerstorage --script mklabel gpt #USING LVM -$ parted /dev/mapper/volumegroup_name-dockerstorage --script mkpart primary 1 100% #USING LVM ----- -+ -Observe references called: -+ -* For ZFS: `dockerstorage-part1` inside the `/dev/zvol/zfsPool` directory -* For LVM: `volumegroup_name-dockerstorage1` inside the `/dev/mapper` directory -+ -This is the partition of the virtual block device to be used to store `/var/lib/docker` from the LXC container. - -. Format the virtual partition to XFS with the `ftype` flag set to `1`: -+ ----- -$ mkfs.xfs -n ftype=1 /dev/zvol/zfsPool/dockerstorage-part1 #FOR ZFS -$ mkfs.xfs -n ftype=1 /dev/mapper/volumegroup_name-dockerstorage1 #FOR LVM ----- - -. Attach the virtual partition to the container (`minikube` is the name of the LXC container, `dockerstorage` is the name for the storage instance in LXC configuration): -+ ----- -$ lxc config device add minikube dockerstorage disk path=/var/lib/docker \ - source=/dev/zvol/zfsPool/dockerstorage-part1 #FOR ZFS -$ lxc config device add minikube dockerstorage disk path=/var/lib/docker \ - source=/dev/mapper/volumegroup_name-dockerstorage1 #FOR LVM ----- -+ -Check the filesystem inside the container using the `df` command: -+ ----- -$ df -T /var/lib/docker ----- - -. Use the following LXC configuration profile in the LXC container to allow it to run Minikube: -+ ----- -config: - linux.kernel_modules: ip_vs,ip_vs_rr,ip_vs_wrr,ip_vs_sh,ip_tables,ip6_tables,netlink_diag,nf_nat,overlay,br_netfilter - raw.lxc: | - lxc.apparmor.profile=unconfined - lxc.mount.auto=proc:rw sys:rw - lxc.cgroup.devices.allow=a - lxc.cap.drop= - security.nesting: "true" - security.privileged: "true" -description: Profile supporting minikube in containers -devices: - aadisable: - path: /sys/module/apparmor/parameters/enabled - source: /dev/null - type: disk - aadisable2: - path: /sys/module/nf_conntrack/parameters/hashsize - source: /sys/module/nf_conntrack/parameters/hashsize - type: disk - aadisable3: - path: /dev/kmsg - source: /dev/kmsg - type: disk -name: minikube ----- - -. After starting and setting up networking and the Docker service inside the container, start Minikube: -+ ----- -$ minikube start --vm-driver=none --extra-config kubeadm.ignore-preflight-errors=SystemVerification ----- diff --git a/modules/administration-guide/partials/proc_using-minikube-to-set-up-kubernetes.adoc b/modules/administration-guide/partials/proc_using-minikube-to-set-up-kubernetes.adoc index 504e5ff349..662caad428 100644 --- a/modules/administration-guide/partials/proc_using-minikube-to-set-up-kubernetes.adoc +++ b/modules/administration-guide/partials/proc_using-minikube-to-set-up-kubernetes.adoc @@ -14,8 +14,8 @@ This section describes how to use Minikube to prepare a local single-node {kuber .Procedure -. Start Minikube (it is important to *allocate at least 4GB of RAM but 8GB are recommended*): +. Start Minikube. Allocate at least 8GB of RAM and 4 CPU cores: + ---- -$ minikube start --addons=ingress --vm=true --memory=8192 +$ minikube start --addons=ingress --vm=true --memory=8192 --cpus=4 ----