===================== # Setup the local environment using the normal procedure for the site and then reboot. # In this example puppet is used. The name of the server is iut2-slate01.iu.edu # iut2-mon02 2:37PM /home/luehring % ssh root@iut2-slate01.iu.edu root@iut2-slate01's password: Last failed login: Tue Aug 3 10:51:28 CDT 2021 from 104.244.74.89 on ssh:notty There were 1853 failed login attempts since the last successful login. [root@iut2-slate01 ~]# puppet agent --test --server=puppet.grid.uchicago.edu --ca_server=puppet-ca.grid.uchicago.edu Info: Downloaded certificate for ca from puppet-ca.grid.uchicago.edu Info: Downloaded certificate revocation list for ca from puppet-ca.grid.uchicago.edu Info: Creating a new RSA SSL key for iut2-slate01.iu.edu [Output from puppet removed.] [Each site will do the local setup differently and have different output.] [root@iut2-slate01 ~]# reboot Connection to iut2-slate01 closed by remote host. Connection to iut2-slate01 closed. iut2-mon02 3:08PM /home/luehring % iut2-mon02 3:30PM /home/luehring % ssh root@iut2-slate01 root@iut2-slate01's password: Last failed login: Tue Aug 3 10:51:28 CDT 2021 from 104.244.74.89 on ssh:notty ===================== # Create the file slate_token.sh use it to install the token that slate will use. # Each site must use their own token in creating this file. # [root@iut2-slate01 ~] vi slate_token.sh [Inserted this text into slate_token.sh] #!/bin/sh mkdir -p -m 0700 "$HOME/.slate" if [ "$?" -ne 0 ] ; then echo "Not able to create $HOME/.slate" 1>&2 exit 1 fi echo "c13ZtUaODDEkelKrbvlzCs" > "$HOME/.slate/token" if [ "$?" -ne 0 ] ; then echo "Not able to write token data to $HOME/.slate/token" 1>&2 exit 1 fi chmod 600 "$HOME/.slate/token" echo 'https://api.slateci.io:443' > ~/.slate/endpoint echo "SLATE access token successfully stored" [And saved slate-token.sh] "slate-token.sh" [New] 17L, 410C written [root@iut2-slate01 ~]# chmod 755 slate-token.sh [root@iut2-slate01 ~]# ./slate-token.sh SLATE access token successfully stored ===================== # Install the SLATE executable. # [root@iut2-slate01 ~]# curl -LO https://jenkins.slateci.io/artifacts/client/slate-linux.sha256 % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 85 100 85 0 0 292 0 --:--:-- --:--:-- --:--:-- 293 [root@iut2-slate01 ~]# curl -LO https://jenkins.slateci.io/artifacts/client/slate-linux.tar.gz % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 1896k 100 1896k 0 0 6585k 0 --:--:-- --:--:-- --:--:-- 6607k [root@iut2-slate01 ~]# ls -ltr total 1968 [Lines removed] -rwxr-xr-x 1 root root 410 Aug 4 10:35 slate-token.sh -rw-r--r-- 1 root root 85 Aug 4 10:36 slate-linux.sha256 -rw-r--r-- 1 root root 1941755 Aug 4 10:37 slate-linux.tar.gz [root@iut2-slate01 ~]# sha256sum -c slate-linux.sha256 slate-linux.tar.gz: OK [root@iut2-slate01 ~]# tar xzvf slate-linux.tar.gz slate [root@iut2-slate01 ~]# ls -ltr total 5992 [Lines removed] -rwxr-xr-x 1 1000 1000 4123632 Jul 30 14:37 slate -rwxr-xr-x 1 root root 410 Aug 4 10:35 slate-token.sh -rw-r--r-- 1 root root 85 Aug 4 10:36 slate-linux.sha256 -rw-r--r-- 1 root root 1941755 Aug 4 10:37 slate-linux.tar.gz [root@iut2-slate01 ~]# mv slate /usr/local/bin/slate ===================== # Check the executable by listing the existing SLATE clusters. # NB: You must supply the full path to the slate executable if /usr/local/bin is not in the default path like it is here. [root@iut2-slate01 ~]# slate cluster list Name Admin ID Rice-CRC-OCI rice-crc cluster_wRzlo7q62VM atlas-af-proto mwt2 cluster_CwuDuKE43GA check-mk-test cmk-um cluster_QJaTB3nj5a8 chtc-tiger chtc-osg cluster_BhJeF1PCuro clemson-aci clemson-aci cluster_btEVWqfo5R4 frontera tacc-admin cluster_xyOjLBH8YFg gpn-beocat gpn-poc cluster_YRl0CqESiJU gpn-poc-onenet gpn-poc cluster_HuMFzohYwDA gpn-poc-test gpn-poc cluster_jx9sSpVedOI iu-osiris osiris cluster_biECfbqdj9g mwt2-iu mwt2 cluster_WMaDQIPYI2s mwt2-uiuc mwt2 cluster_P1UMU2tRc8A net2 net2 cluster_Z-gQFi1-O1Y nmsu nmsu cluster_u5P6TmKkOU0 notredame ndcms cluster_nt2Exr5snOI osg-gatech-dev gatech-dev cluster_W_LeACXrMC8 prague-xcache atlas-xcache cluster_hTUJr76Nvg0 spt-npx spt cluster_z0Ui4YRQ6Uk swt2-cpb swt2-cpb cluster_u9iI0_EYe3o t3colorado t3colorado cluster_xYxCGxfGmMI uchicago-af atlas-af-ops cluster_pK-rwKJ9gr8 uchicago-prod slate-dev cluster_yZroQR5mfBk uchicago-river-v2 ssl cluster_iL8D7abxCM8 uchicago-singularity slate-dev cluster_3Mm9JisxCGc umich-prod slate-dev cluster_WRb0f8mH9ak uutah-prod slate-dev cluster_omwbRNbKRKE ===================== # The Windows 95 step: turn off selinux and the firewall. # [root@iut2-slate01 ~]# setenforce 0 setenforce: SELinux is disabled [root@iut2-slate01 ~]# sed -i --follow-symlinks 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux [root@iut2-slate01 ~]# swapoff -a [root@iut2-slate01 ~]# sed -e '/swap/s/^/#/g' -i /etc/fstab [root@iut2-slate01 ~]# systemctl disable --now firewalld [root@iut2-slate01 ~]# cat < /etc/sysctl.d/k8s.conf > net.bridge.bridge-nf-call-ip6tables = 1 > net.bridge.bridge-nf-call-iptables = 1 > EOF [root@iut2-slate01 ~]# sysctl --system * Applying /usr/lib/sysctl.d/00-system.conf ... * Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ... kernel.yama.ptrace_scope = 0 * Applying /usr/lib/sysctl.d/50-default.conf ... kernel.sysrq = 16 kernel.core_uses_pid = 1 kernel.kptr_restrict = 1 net.ipv4.conf.default.rp_filter = 1 net.ipv4.conf.all.rp_filter = 1 net.ipv4.conf.default.accept_source_route = 0 net.ipv4.conf.all.accept_source_route = 0 net.ipv4.conf.default.promote_secondaries = 1 net.ipv4.conf.all.promote_secondaries = 1 fs.protected_hardlinks = 1 fs.protected_symlinks = 1 * Applying /etc/sysctl.d/99-sysctl.conf ... * Applying /etc/sysctl.d/k8s.conf ... * Applying /etc/sysctl.conf ... ===================== # Install the packages needed to create a cluster. # [root@iut2-slate01 ~]# # Add the yum-config-manager tool if you don't already have it [root@iut2-slate01 ~]# yum install yum-utils -y Resolving Dependencies --> Running transaction check ---> Package yum-utils.noarch 0:1.1.31-54.el7_8 will be installed --> Processing Dependency: libxml2-python for package: yum-utils-1.1.31-54.el7_8.noarch --> Processing Dependency: python-kitchen for package: yum-utils-1.1.31-54.el7_8.noarch --> Running transaction check ---> Package libxml2-python.x86_64 0:2.9.1-6.el7.5 will be installed ---> Package python-kitchen.noarch 0:1.1.1-5.el7 will be installed --> Processing Dependency: python-chardet for package: python-kitchen-1.1.1-5.el7.noarch --> Running transaction check ---> Package python-chardet.noarch 0:2.2.1-3.el7 will be installed --> Finished Dependency Resolution Dependencies Resolved ==================================================================================================================================== Package Arch Version Repository Size ==================================================================================================================================== Installing: yum-utils noarch 1.1.31-54.el7_8 sl 115 k Installing for dependencies: libxml2-python x86_64 2.9.1-6.el7.5 sl 227 k python-chardet noarch 2.2.1-3.el7 sl 197 k python-kitchen noarch 1.1.1-5.el7 sl 265 k Transaction Summary ==================================================================================================================================== Install 1 Package (+3 Dependent packages) Total download size: 803 k Installed size: 4.2 M Downloading packages: warning: /var/cache/yum/x86_64/7/sl/packages/libxml2-python-2.9.1-6.el7.5.x86_64.rpm: Header V4 DSA/SHA1 Signature, key ID 192a7d7d: NOKEY Public key for libxml2-python-2.9.1-6.el7.5.x86_64.rpm is not installed (1/4): libxml2-python-2.9.1-6.el7.5.x86_64.rpm | 227 kB 00:00:00 (2/4): yum-utils-1.1.31-54.el7_8.noarch.rpm | 115 kB 00:00:00 (3/4): python-chardet-2.2.1-3.el7.noarch.rpm | 197 kB 00:00:00 (4/4): python-kitchen-1.1.1-5.el7.noarch.rpm | 265 kB 00:00:00 ------------------------------------------------------------------------------------------------------------------------------------ Total 2.3 MB/s | 803 kB 00:00:00 Retrieving key from file:///etc/pki/rpm-gpg/RPM-GPG-KEY-sl Importing GPG key 0x192A7D7D: Userid : "Scientific Linux (RPM signing key for Scientific Linux) " Fingerprint: 40d4 223e 3673 3c2e 1b24 e755 b0b4 183f 192a 7d7d Package : sl-release-7.9-1.sl7.x86_64 (@anaconda/7.9) From : /etc/pki/rpm-gpg/RPM-GPG-KEY-sl Retrieving key from file:///etc/pki/rpm-gpg/RPM-GPG-KEY-sl7 Importing GPG key 0x1AA2F65C: Userid : "Scientific Linux (RPM signing key for Scientific Linux 7) " Fingerprint: ecd5 c55b bd6d 1b59 d080 4fd4 8333 0ded 1aa2 f65c Package : sl-release-7.9-1.sl7.x86_64 (@anaconda/7.9) From : /etc/pki/rpm-gpg/RPM-GPG-KEY-sl7 Running transaction check Running transaction test Transaction test succeeded Running transaction Installing : python-chardet-2.2.1-3.el7.noarch 1/4 Installing : python-kitchen-1.1.1-5.el7.noarch 2/4 Installing : libxml2-python-2.9.1-6.el7.5.x86_64 3/4 Installing : yum-utils-1.1.31-54.el7_8.noarch 4/4 Verifying : libxml2-python-2.9.1-6.el7.5.x86_64 1/4 Verifying : python-kitchen-1.1.1-5.el7.noarch 2/4 Verifying : yum-utils-1.1.31-54.el7_8.noarch 3/4 Verifying : python-chardet-2.2.1-3.el7.noarch 4/4 Installed: yum-utils.noarch 0:1.1.31-54.el7_8 Dependency Installed: libxml2-python.x86_64 0:2.9.1-6.el7.5 python-chardet.noarch 0:2.2.1-3.el7 python-kitchen.noarch 0:1.1.1-5.el7 Complete! [root@iut2-slate01 ~]# [root@iut2-slate01 ~]# # Add Docker stable repo to Yum [root@iut2-slate01 ~]# yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo adding repo from: https://download.docker.com/linux/centos/docker-ce.repo grabbing file https://download.docker.com/linux/centos/docker-ce.repo to /etc/yum.repos.d/docker-ce.repo repo saved to /etc/yum.repos.d/docker-ce.repo [root@iut2-slate01 ~]# [root@iut2-slate01 ~]# # Install the latest version of DockerCE and containerd [root@iut2-slate01 ~]# yum install docker-ce docker-ce-cli containerd.io -y docker-ce-stable | 3.5 kB 00:00:00 (1/2): docker-ce-stable/7/x86_64/primary_db | 63 kB 00:00:00 (2/2): docker-ce-stable/7/x86_64/updateinfo | 55 B 00:00:00 Resolving Dependencies --> Running transaction check ---> Package containerd.io.x86_64 0:1.4.9-3.1.el7 will be installed --> Processing Dependency: container-selinux >= 2:2.74 for package: containerd.io-1.4.9-3.1.el7.x86_64 ---> Package docker-ce.x86_64 3:20.10.8-3.el7 will be installed --> Processing Dependency: docker-ce-rootless-extras for package: 3:docker-ce-20.10.8-3.el7.x86_64 --> Processing Dependency: libcgroup for package: 3:docker-ce-20.10.8-3.el7.x86_64 ---> Package docker-ce-cli.x86_64 1:20.10.8-3.el7 will be installed --> Processing Dependency: docker-scan-plugin(x86-64) for package: 1:docker-ce-cli-20.10.8-3.el7.x86_64 --> Running transaction check ---> Package container-selinux.noarch 2:2.119.2-1.911c772.el7_8 will be installed --> Processing Dependency: policycoreutils-python for package: 2:container-selinux-2.119.2-1.911c772.el7_8.noarch ---> Package docker-ce-rootless-extras.x86_64 0:20.10.8-3.el7 will be installed --> Processing Dependency: fuse-overlayfs >= 0.7 for package: docker-ce-rootless-extras-20.10.8-3.el7.x86_64 --> Processing Dependency: slirp4netns >= 0.4 for package: docker-ce-rootless-extras-20.10.8-3.el7.x86_64 ---> Package docker-scan-plugin.x86_64 0:0.8.0-3.el7 will be installed ---> Package libcgroup.x86_64 0:0.41-21.el7 will be installed --> Running transaction check ---> Package fuse-overlayfs.x86_64 0:0.7.2-6.el7_8 will be installed --> Processing Dependency: libfuse3.so.3(FUSE_3.0)(64bit) for package: fuse-overlayfs-0.7.2-6.el7_8.x86_64 --> Processing Dependency: libfuse3.so.3(FUSE_3.2)(64bit) for package: fuse-overlayfs-0.7.2-6.el7_8.x86_64 --> Processing Dependency: libfuse3.so.3()(64bit) for package: fuse-overlayfs-0.7.2-6.el7_8.x86_64 ---> Package policycoreutils-python.x86_64 0:2.5-34.el7 will be installed --> Processing Dependency: audit-libs-python >= 2.1.3-4 for package: policycoreutils-python-2.5-34.el7.x86_64 --> Processing Dependency: libsemanage-python >= 2.5-14 for package: policycoreutils-python-2.5-34.el7.x86_64 --> Processing Dependency: setools-libs >= 3.3.8-4 for package: policycoreutils-python-2.5-34.el7.x86_64 --> Processing Dependency: checkpolicy for package: policycoreutils-python-2.5-34.el7.x86_64 --> Processing Dependency: libapol.so.4(VERS_4.0)(64bit) for package: policycoreutils-python-2.5-34.el7.x86_64 --> Processing Dependency: libqpol.so.1(VERS_1.2)(64bit) for package: policycoreutils-python-2.5-34.el7.x86_64 --> Processing Dependency: libqpol.so.1(VERS_1.4)(64bit) for package: policycoreutils-python-2.5-34.el7.x86_64 --> Processing Dependency: python-IPy for package: policycoreutils-python-2.5-34.el7.x86_64 --> Processing Dependency: libapol.so.4()(64bit) for package: policycoreutils-python-2.5-34.el7.x86_64 --> Processing Dependency: libqpol.so.1()(64bit) for package: policycoreutils-python-2.5-34.el7.x86_64 ---> Package slirp4netns.x86_64 0:0.4.3-4.el7_8 will be installed --> Running transaction check ---> Package audit-libs-python.x86_64 0:2.8.5-4.el7 will be installed ---> Package checkpolicy.x86_64 0:2.5-8.el7 will be installed ---> Package fuse3-libs.x86_64 0:3.6.1-4.el7 will be installed ---> Package libsemanage-python.x86_64 0:2.5-14.el7 will be installed ---> Package python-IPy.noarch 0:0.75-6.el7 will be installed ---> Package setools-libs.x86_64 0:3.3.8-4.el7 will be installed --> Finished Dependency Resolution Dependencies Resolved ==================================================================================================================================== Package Arch Version Repository Size ==================================================================================================================================== Installing: containerd.io x86_64 1.4.9-3.1.el7 docker-ce-stable 30 M docker-ce x86_64 3:20.10.8-3.el7 docker-ce-stable 23 M docker-ce-cli x86_64 1:20.10.8-3.el7 docker-ce-stable 29 M Installing for dependencies: audit-libs-python x86_64 2.8.5-4.el7 sl 71 k checkpolicy x86_64 2.5-8.el7 sl 281 k container-selinux noarch 2:2.119.2-1.911c772.el7_8 sl-extras 39 k docker-ce-rootless-extras x86_64 20.10.8-3.el7 docker-ce-stable 8.0 M docker-scan-plugin x86_64 0.8.0-3.el7 docker-ce-stable 4.2 M fuse-overlayfs x86_64 0.7.2-6.el7_8 sl-extras 52 k fuse3-libs x86_64 3.6.1-4.el7 sl-extras 78 k libcgroup x86_64 0.41-21.el7 sl 63 k libsemanage-python x86_64 2.5-14.el7 sl 109 k policycoreutils-python x86_64 2.5-34.el7 sl 431 k python-IPy noarch 0.75-6.el7 sl 31 k setools-libs x86_64 3.3.8-4.el7 sl 579 k slirp4netns x86_64 0.4.3-4.el7_8 sl-extras 78 k Transaction Summary ==================================================================================================================================== Install 3 Packages (+13 Dependent packages) Total download size: 96 M Installed size: 386 M Downloading packages: (1/16): audit-libs-python-2.8.5-4.el7.x86_64.rpm | 71 kB 00:00:00 (2/16): checkpolicy-2.5-8.el7.x86_64.rpm | 281 kB 00:00:00 (3/16): container-selinux-2.119.2-1.911c772.el7_8.noarch.rpm | 39 kB 00:00:00 warning: /var/cache/yum/x86_64/7/docker-ce-stable/packages/docker-ce-20.10.8-3.el7.x86_64.rpm: Header V4 RSA/SHA512 Signature, key ID 621e9f35: NOKEY Public key for docker-ce-20.10.8-3.el7.x86_64.rpm is not installed (4/16): docker-ce-20.10.8-3.el7.x86_64.rpm | 23 MB 00:00:01 (5/16): containerd.io-1.4.9-3.1.el7.x86_64.rpm | 30 MB 00:00:01 (6/16): docker-ce-rootless-extras-20.10.8-3.el7.x86_64.rpm | 8.0 MB 00:00:00 (7/16): fuse3-libs-3.6.1-4.el7.x86_64.rpm | 78 kB 00:00:00 (8/16): fuse-overlayfs-0.7.2-6.el7_8.x86_64.rpm | 52 kB 00:00:00 (9/16): libsemanage-python-2.5-14.el7.x86_64.rpm | 109 kB 00:00:00 (10/16): python-IPy-0.75-6.el7.noarch.rpm | 31 kB 00:00:00 (11/16): setools-libs-3.3.8-4.el7.x86_64.rpm | 579 kB 00:00:00 (12/16): libcgroup-0.41-21.el7.x86_64.rpm | 63 kB 00:00:00 (13/16): docker-scan-plugin-0.8.0-3.el7.x86_64.rpm | 4.2 MB 00:00:00 (14/16): slirp4netns-0.4.3-4.el7_8.x86_64.rpm | 78 kB 00:00:00 (15/16): policycoreutils-python-2.5-34.el7.x86_64.rpm | 431 kB 00:00:00 (16/16): docker-ce-cli-20.10.8-3.el7.x86_64.rpm | 29 MB 00:00:00 ------------------------------------------------------------------------------------------------------------------------------------ Total 48 MB/s | 96 MB 00:00:01 Retrieving key from https://download.docker.com/linux/centos/gpg Importing GPG key 0x621E9F35: Userid : "Docker Release (CE rpm) " Fingerprint: 060a 61c5 1b55 8a7f 742b 77aa c52f eb6b 621e 9f35 From : https://download.docker.com/linux/centos/gpg Running transaction check Running transaction test Transaction test succeeded Running transaction Installing : libcgroup-0.41-21.el7.x86_64 1/16 Installing : 1:docker-ce-cli-20.10.8-3.el7.x86_64 2/16 Installing : docker-scan-plugin-0.8.0-3.el7.x86_64 3/16 Installing : setools-libs-3.3.8-4.el7.x86_64 4/16 Installing : slirp4netns-0.4.3-4.el7_8.x86_64 5/16 Installing : libsemanage-python-2.5-14.el7.x86_64 6/16 Installing : python-IPy-0.75-6.el7.noarch 7/16 Installing : audit-libs-python-2.8.5-4.el7.x86_64 8/16 Installing : fuse3-libs-3.6.1-4.el7.x86_64 9/16 Installing : fuse-overlayfs-0.7.2-6.el7_8.x86_64 10/16 Installing : checkpolicy-2.5-8.el7.x86_64 11/16 Installing : policycoreutils-python-2.5-34.el7.x86_64 12/16 Installing : 2:container-selinux-2.119.2-1.911c772.el7_8.noarch 13/16 setsebool: SELinux is disabled. Installing : containerd.io-1.4.9-3.1.el7.x86_64 14/16 Installing : docker-ce-rootless-extras-20.10.8-3.el7.x86_64 15/16 Installing : 3:docker-ce-20.10.8-3.el7.x86_64 16/16 Verifying : checkpolicy-2.5-8.el7.x86_64 1/16 Verifying : fuse3-libs-3.6.1-4.el7.x86_64 2/16 Verifying : docker-ce-rootless-extras-20.10.8-3.el7.x86_64 3/16 Verifying : audit-libs-python-2.8.5-4.el7.x86_64 4/16 Verifying : python-IPy-0.75-6.el7.noarch 5/16 Verifying : docker-scan-plugin-0.8.0-3.el7.x86_64 6/16 Verifying : libsemanage-python-2.5-14.el7.x86_64 7/16 Verifying : slirp4netns-0.4.3-4.el7_8.x86_64 8/16 Verifying : 2:container-selinux-2.119.2-1.911c772.el7_8.noarch 9/16 Verifying : containerd.io-1.4.9-3.1.el7.x86_64 10/16 Verifying : policycoreutils-python-2.5-34.el7.x86_64 11/16 Verifying : 1:docker-ce-cli-20.10.8-3.el7.x86_64 12/16 Verifying : 3:docker-ce-20.10.8-3.el7.x86_64 13/16 Verifying : setools-libs-3.3.8-4.el7.x86_64 14/16 Verifying : fuse-overlayfs-0.7.2-6.el7_8.x86_64 15/16 Verifying : libcgroup-0.41-21.el7.x86_64 16/16 Installed: containerd.io.x86_64 0:1.4.9-3.1.el7 docker-ce.x86_64 3:20.10.8-3.el7 docker-ce-cli.x86_64 1:20.10.8-3.el7 Dependency Installed: audit-libs-python.x86_64 0:2.8.5-4.el7 checkpolicy.x86_64 0:2.5-8.el7 container-selinux.noarch 2:2.119.2-1.911c772.el7_8 docker-ce-rootless-extras.x86_64 0:20.10.8-3.el7 docker-scan-plugin.x86_64 0:0.8.0-3.el7 fuse-overlayfs.x86_64 0:0.7.2-6.el7_8 fuse3-libs.x86_64 0:3.6.1-4.el7 libcgroup.x86_64 0:0.41-21.el7 libsemanage-python.x86_64 0:2.5-14.el7 policycoreutils-python.x86_64 0:2.5-34.el7 python-IPy.noarch 0:0.75-6.el7 setools-libs.x86_64 0:3.3.8-4.el7 slirp4netns.x86_64 0:0.4.3-4.el7_8 Complete! ===================== [root@iut2-slate01 ~]# # Enable Docker on reboot through systemctl [root@iut2-slate01 ~]# systemctl enable --now docker Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service. [root@iut2-slate01 ~]# cat < /etc/yum.repos.d/kubernetes.repo > [kubernetes] > name=Kubernetes > baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 > enabled=1 > gpgcheck=1 > repo_gpgcheck=1 > gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg > EOF ===================== # Continue installing necessary packages [root@iut2-slate01 ~]# # Install the three necessary Kubernetes components [root@iut2-slate01 ~]# yum install -y kubeadm kubectl kubelet --disableexcludes=kubernetes kubernetes/signature | 844 B 00:00:00 Retrieving key from https://packages.cloud.google.com/yum/doc/yum-key.gpg Importing GPG key 0x307EA071: Userid : "Rapture Automatic Signing Key (cloud-rapture-signing-key-2021-03-01-08_01_09.pub)" Fingerprint: 7f92 e05b 3109 3bef 5a3c 2d38 feea 9169 307e a071 From : https://packages.cloud.google.com/yum/doc/yum-key.gpg Retrieving key from https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg kubernetes/signature | 1.4 kB 00:00:00 !!! kubernetes/primary | 93 kB 00:00:00 kubernetes 687/687 Resolving Dependencies --> Running transaction check ---> Package kubeadm.x86_64 0:1.21.3-0 will be installed --> Processing Dependency: kubernetes-cni >= 0.8.6 for package: kubeadm-1.21.3-0.x86_64 --> Processing Dependency: cri-tools >= 1.13.0 for package: kubeadm-1.21.3-0.x86_64 ---> Package kubectl.x86_64 0:1.21.3-0 will be installed ---> Package kubelet.x86_64 0:1.21.3-0 will be installed --> Processing Dependency: socat for package: kubelet-1.21.3-0.x86_64 --> Processing Dependency: conntrack for package: kubelet-1.21.3-0.x86_64 --> Running transaction check ---> Package conntrack-tools.x86_64 0:1.4.4-7.el7 will be installed --> Processing Dependency: libnetfilter_cthelper.so.0(LIBNETFILTER_CTHELPER_1.0)(64bit) for package: conntrack-tools-1.4.4-7.el7.x86_64 --> Processing Dependency: libnetfilter_cttimeout.so.1(LIBNETFILTER_CTTIMEOUT_1.0)(64bit) for package: conntrack-tools-1.4.4-7.el7.x86_64 --> Processing Dependency: libnetfilter_cttimeout.so.1(LIBNETFILTER_CTTIMEOUT_1.1)(64bit) for package: conntrack-tools-1.4.4-7.el7.x86_64 --> Processing Dependency: libnetfilter_cthelper.so.0()(64bit) for package: conntrack-tools-1.4.4-7.el7.x86_64 --> Processing Dependency: libnetfilter_cttimeout.so.1()(64bit) for package: conntrack-tools-1.4.4-7.el7.x86_64 --> Processing Dependency: libnetfilter_queue.so.1()(64bit) for package: conntrack-tools-1.4.4-7.el7.x86_64 ---> Package cri-tools.x86_64 0:1.13.0-0 will be installed ---> Package kubernetes-cni.x86_64 0:0.8.7-0 will be installed ---> Package socat.x86_64 0:1.7.3.2-2.el7 will be installed --> Running transaction check ---> Package libnetfilter_cthelper.x86_64 0:1.0.0-11.el7 will be installed ---> Package libnetfilter_cttimeout.x86_64 0:1.0.0-7.el7 will be installed ---> Package libnetfilter_queue.x86_64 0:1.0.2-2.el7_2 will be installed --> Finished Dependency Resolution Dependencies Resolved ==================================================================================================================================== Package Arch Version Repository Size ==================================================================================================================================== Installing: kubeadm x86_64 1.21.3-0 kubernetes 9.1 M kubectl x86_64 1.21.3-0 kubernetes 9.5 M kubelet x86_64 1.21.3-0 kubernetes 20 M Installing for dependencies: conntrack-tools x86_64 1.4.4-7.el7 sl 178 k cri-tools x86_64 1.13.0-0 kubernetes 5.1 M kubernetes-cni x86_64 0.8.7-0 kubernetes 19 M libnetfilter_cthelper x86_64 1.0.0-11.el7 sl 17 k libnetfilter_cttimeout x86_64 1.0.0-7.el7 sl 16 k libnetfilter_queue x86_64 1.0.2-2.el7_2 sl 22 k socat x86_64 1.7.3.2-2.el7 sl 289 k Transaction Summary ==================================================================================================================================== Install 3 Packages (+7 Dependent packages) Total download size: 63 M Installed size: 277 M Downloading packages: (1/10): conntrack-tools-1.4.4-7.el7.x86_64.rpm | 178 kB 00:00:00 warning: /var/cache/yum/x86_64/7/kubernetes/packages/14bfe6e75a9efc8eca3f638eb22c7e2ce759c67f95b43b16fae4ebabde1549f3-cri-tools-1.13.0-0.x86_64.rpm: Header V4 RSA/SHA512 Signature, key ID 3e1ba8d5: NOKEY Public key for 14bfe6e75a9efc8eca3f638eb22c7e2ce759c67f95b43b16fae4ebabde1549f3-cri-tools-1.13.0-0.x86_64.rpm is not installed (2/10): 14bfe6e75a9efc8eca3f638eb22c7e2ce759c67f95b43b16fae4ebabde1549f3-cri-tools-1.13.0-0.x86_64.rpm | 5.1 MB 00:00:00 (3/10): 23f7e018d7380fc0c11f0a12b7fda8ced07b1c04c4ba1c5f5cd24cd4bdfb304d-kubeadm-1.21.3-0.x86_64.rpm | 9.1 MB 00:00:00 (4/10): b04e5387f5522079ac30ee300657212246b14279e2ca4b58415c7bf1f8c8a8f5-kubectl-1.21.3-0.x86_64.rpm | 9.5 MB 00:00:00 (5/10): libnetfilter_cthelper-1.0.0-11.el7.x86_64.rpm | 17 kB 00:00:00 (6/10): socat-1.7.3.2-2.el7.x86_64.rpm | 289 kB 00:00:00 (7/10): libnetfilter_cttimeout-1.0.0-7.el7.x86_64.rpm | 16 kB 00:00:00 (8/10): libnetfilter_queue-1.0.2-2.el7_2.x86_64.rpm | 22 kB 00:00:00 (9/10): 7e38e980f058e3e43f121c2ba73d60156083d09be0acc2e5581372136ce11a1c-kubelet-1.21.3-0.x86_64.rpm | 20 MB 00:00:00 (10/10): db7cb5cb0b3f6875f54d10f02e625573988e3e91fd4fc5eef0b1876bb18604ad-kubernetes-cni-0.8.7-0.x86_64.rpm | 19 MB 00:00:00 ------------------------------------------------------------------------------------------------------------------------------------ Total 35 MB/s | 63 MB 00:00:01 Retrieving key from https://packages.cloud.google.com/yum/doc/yum-key.gpg Importing GPG key 0x307EA071: Userid : "Rapture Automatic Signing Key (cloud-rapture-signing-key-2021-03-01-08_01_09.pub)" Fingerprint: 7f92 e05b 3109 3bef 5a3c 2d38 feea 9169 307e a071 From : https://packages.cloud.google.com/yum/doc/yum-key.gpg Retrieving key from https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg Importing GPG key 0x3E1BA8D5: Userid : "Google Cloud Packages RPM Signing Key " Fingerprint: 3749 e1ba 95a8 6ce0 5454 6ed2 f09c 394c 3e1b a8d5 From : https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg Running transaction check Running transaction test Transaction test succeeded Running transaction Installing : libnetfilter_cttimeout-1.0.0-7.el7.x86_64 1/10 Installing : socat-1.7.3.2-2.el7.x86_64 2/10 Installing : libnetfilter_cthelper-1.0.0-11.el7.x86_64 3/10 Installing : cri-tools-1.13.0-0.x86_64 4/10 Installing : libnetfilter_queue-1.0.2-2.el7_2.x86_64 5/10 Installing : conntrack-tools-1.4.4-7.el7.x86_64 6/10 Installing : kubernetes-cni-0.8.7-0.x86_64 7/10 Installing : kubelet-1.21.3-0.x86_64 8/10 Installing : kubectl-1.21.3-0.x86_64 9/10 Installing : kubeadm-1.21.3-0.x86_64 10/10 Verifying : kubectl-1.21.3-0.x86_64 1/10 Verifying : conntrack-tools-1.4.4-7.el7.x86_64 2/10 Verifying : kubernetes-cni-0.8.7-0.x86_64 3/10 Verifying : libnetfilter_queue-1.0.2-2.el7_2.x86_64 4/10 Verifying : cri-tools-1.13.0-0.x86_64 5/10 Verifying : kubeadm-1.21.3-0.x86_64 6/10 Verifying : kubelet-1.21.3-0.x86_64 7/10 Verifying : libnetfilter_cthelper-1.0.0-11.el7.x86_64 8/10 Verifying : socat-1.7.3.2-2.el7.x86_64 9/10 Verifying : libnetfilter_cttimeout-1.0.0-7.el7.x86_64 10/10 Installed: kubeadm.x86_64 0:1.21.3-0 kubectl.x86_64 0:1.21.3-0 kubelet.x86_64 0:1.21.3-0 Dependency Installed: conntrack-tools.x86_64 0:1.4.4-7.el7 cri-tools.x86_64 0:1.13.0-0 kubernetes-cni.x86_64 0:0.8.7-0 libnetfilter_cthelper.x86_64 0:1.0.0-11.el7 libnetfilter_cttimeout.x86_64 0:1.0.0-7.el7 libnetfilter_queue.x86_64 0:1.0.2-2.el7_2 socat.x86_64 0:1.7.3.2-2.el7 Complete! [root@iut2-slate01 ~]# ===================== # [root@iut2-slate01 ~]# # Enable Kubelet through systemctl. [root@iut2-slate01 ~]# systemctl enable --now kubelet Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service. [root@iut2-slate01 ~]# kubeadm init --pod-network-cidr=192.168.0.0/16 [init] Using Kubernetes version: v1.21.3 [preflight] Running pre-flight checks [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/ [preflight] Pulling images required for setting up a Kubernetes cluster [preflight] This might take a minute or two, depending on the speed of your internet connection [preflight] You can also perform this action in beforehand using 'kubeadm config images pull' [certs] Using certificateDir folder "/etc/kubernetes/pki" [certs] Generating "ca" certificate and key [certs] Generating "apiserver" certificate and key [certs] apiserver serving cert is signed for DNS names [iut2-slate01.iu.edu kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 149.165.224.240] [certs] Generating "apiserver-kubelet-client" certificate and key [certs] Generating "front-proxy-ca" certificate and key [certs] Generating "front-proxy-client" certificate and key [certs] Generating "etcd/ca" certificate and key [certs] Generating "etcd/server" certificate and key [certs] etcd/server serving cert is signed for DNS names [iut2-slate01.iu.edu localhost] and IPs [149.165.224.240 127.0.0.1 ::1] [certs] Generating "etcd/peer" certificate and key [certs] etcd/peer serving cert is signed for DNS names [iut2-slate01.iu.edu localhost] and IPs [149.165.224.240 127.0.0.1 ::1] [certs] Generating "etcd/healthcheck-client" certificate and key [certs] Generating "apiserver-etcd-client" certificate and key [certs] Generating "sa" key and public key [kubeconfig] Using kubeconfig folder "/etc/kubernetes" [kubeconfig] Writing "admin.conf" kubeconfig file [kubeconfig] Writing "kubelet.conf" kubeconfig file [kubeconfig] Writing "controller-manager.conf" kubeconfig file [kubeconfig] Writing "scheduler.conf" kubeconfig file [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Starting the kubelet [control-plane] Using manifest folder "/etc/kubernetes/manifests" [control-plane] Creating static Pod manifest for "kube-apiserver" [control-plane] Creating static Pod manifest for "kube-controller-manager" [control-plane] Creating static Pod manifest for "kube-scheduler" [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s [apiclient] All control plane components are healthy after 19.503233 seconds [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace [kubelet] Creating a ConfigMap "kubelet-config-1.21" in namespace kube-system with the configuration for the kubelets in the cluster [upload-certs] Skipping phase. Please see --upload-certs [mark-control-plane] Marking the node iut2-slate01.iu.edu as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers] [mark-control-plane] Marking the node iut2-slate01.iu.edu as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] [bootstrap-token] Using token: ld0pgk.83hdx0139ktah6ml [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key [addons] Applied essential addon: CoreDNS [addons] Applied essential addon: kube-proxy Your Kubernetes control-plane has initialized successfully! To start using your cluster, you need to run the following as a regular user: mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config Alternatively, if you are the root user, you can run: export KUBECONFIG=/etc/kubernetes/admin.conf You should now deploy a pod network to the cluster. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/ Then you can join any number of worker nodes by running the following on each as root: kubeadm join 149.165.224.240:6443 --token ld0pgk.83hdx0139ktah6ml \ --discovery-token-ca-cert-hash sha256:ff59fd3087aeddd1840814d1c59c07304c3a7ccd74e8c7f1bfae8c407607f0ac [root@iut2-slate01 ~]# mkdir -p $HOME/.kube [root@iut2-slate01 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config [root@iut2-slate01 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config [root@iut2-slate01 ~]# kubectl apply -f https://docs.projectcalico.org/v3.8/manifests/calico.yaml configmap/calico-config created Warning: apiextensions.k8s.io/v1beta1 CustomResourceDefinition is deprecated in v1.16+, unavailable in v1.22+; use apiextensions.k8s.io/v1 CustomResourceDefinition customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created clusterrole.rbac.authorization.k8s.io/calico-node created clusterrolebinding.rbac.authorization.k8s.io/calico-node created daemonset.apps/calico-node created serviceaccount/calico-node created deployment.apps/calico-kube-controllers created serviceaccount/calico-kube-controllers created [root@iut2-slate01 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION iut2-slate01.iu.edu Ready control-plane,master 5h52m v1.21.3 [root@iut2-slate01 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION iut2-slate01.iu.edu Ready control-plane,master 6h1m v1.21.3 [root@iut2-slate01 ~]# kubectl taint nodes --all node-role.kubernetes.io/master- node/iut2-slate01.iu.edu untainted ===================== # Install the metallb load balancer which is used to setup IP addresses and not for load balancing. # Needed even if there are no worker nodes in the cluster. # [root@iut2-slate01 ~]# kubectl apply -f https://raw.githubusercontent.com/google/metallb/v0.8.1/manifests/metallb.yaml namespace/metallb-system created Warning: policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+ podsecuritypolicy.policy/speaker created serviceaccount/controller created serviceaccount/speaker created clusterrole.rbac.authorization.k8s.io/metallb-system:controller created clusterrole.rbac.authorization.k8s.io/metallb-system:speaker created role.rbac.authorization.k8s.io/config-watcher created clusterrolebinding.rbac.authorization.k8s.io/metallb-system:controller created clusterrolebinding.rbac.authorization.k8s.io/metallb-system:speaker created rolebinding.rbac.authorization.k8s.io/config-watcher created daemonset.apps/speaker created deployment.apps/controller created [root@iut2-slate01 ~]# cat < metallb-config.yaml > apiVersion: v1 > kind: ConfigMap > metadata: > namespace: metallb-system > name: config > data: > config: | > address-pools: > - name: default > protocol: layer2 > addresses: > - 149.165.224.242/30 > EOF [root@iut2-slate01 ~]# rm metallb-config.yaml rm: remove regular file ‘metallb-config.yaml’? y [root@iut2-slate01 ~]# cat < metallb-config.yaml > apiVersion: v1 > kind: ConfigMap > metadata: > namespace: metallb-system > name: config > data: > config: | > address-pools: > - name: default > protocol: layer2 > addresses: > - 149.165.224.242/32 > EOF [root@iut2-slate01 ~]# kubectl apply -f metallb-config.yaml configmap/config created ===================== # Create the cluster! # [root@iut2-slate01 ~]# slate cluster create mwt2-iu-test --group atlas-squid --org "Indiana University" -y Checking NRP-controller status... Installed NRP-Controller tag: latest The version of the nrp-controller is unclear; re-installing it is recommended. Do you want to delete the current version so that a newer one can be installed? [y]/n: assuming yes Applying https://raw.githubusercontent.com/slateci/slate-client-server/master/resources/federation-deployment.yaml Waiting for the NRP Controller to become active... Creating cluster... NRP Controller is active Ensuring that Custom Resource Definitions are active... CRDs are active Checking for federation ClusterRole... ClusterRole is defined SLATE should be granted access using a ServiceAccount created with a Cluster object by the nrp-controller. Do you want to create such a ServiceAccount automatically now? [y]/n: assuming yes Please enter the name you would like to give the ServiceAccount and core SLATE namespace. The default is 'slate-system': assuming slate-system The namespace 'slate-system' already exists. Proceed with reusing it? [y]/n: assuming yes Waiting for namespace slate-system to become ready... ... Locating ServiceAccount credentials... Extracting CA data... Determining server address... Extracting ServiceAccount token... Done generating config with limited privileges Checking for a LoadBalancer... Found MetalLB Checking for a SLATE ingress controller... SLATE requires an ingress controller to support user-friendly DNS names for HTTP services. SLATE's controller uses a customized ingress class so that it should not conflict with other controllers. Do you want to install the ingress controller now? [y]/n: assuming yes Finding the LoadBalancer address assigned to the ingress controller... Ingress controller address: 149.165.224.242 Sending config to SLATE server... ... ... ... Successfully created cluster mwt2-iu-test with ID cluster_mvO9QzLUj94 Services using Ingress on this cluster can be assigned subdomains within the mwt2-iu-test.slateci.net domain. [root@iut2-slate01 ~]# slate cluster update mwt2-iu-test --location 39.77382,-86.167904 Updating cluster... ... ... Successfully updated cluster mwt2-iu-test [root@iut2-slate01 ~]# slate cluster allow-group mwt2-iu-test atlas-squid Successfully granted group atlas-squid access to cluster mwt2-iu-test [root@iut2-slate01 ~]# exit