oc get nodes
NAME STATUS ROLES AGE VERSION
master01 Ready master,worker 2d v1.18.3+012b3ec
master02 Ready master,worker 2d v1.18.3+012b3ec
master03 Ready master,worker 2d v1.18.3+012b3ec
oc adm top node
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
master01 499m 14% 3235Mi 21%
master02 769m 21% 4933Mi 33%
master03 1016m 29% 6087Mi 40%
oc describe node master01
oc get project #To list the projects
oc get pod -n PROJECT_NAME #To list the pod in a project
NAME Ready STATUS RESTARTS AGE
cluster-image-registry-operator-56 2/2 Running 0 13m
oc logs --tail 3 -n openshift-image-registry
> -c cluster-image-registry-operator
> cluster-image-registry-operator-564bd5dd8f-s46bz
oc logs --tail 3 -n openshift-image-registry
> cluster-image-registry-operator-564bd5dd8f-s46bz
oc debug node/NODE_NAME #To start a shell session on the node
chroot /host #To enter local file system of the host
uname -n #To check host name
systemctl status kubelet #To check service status of kubelet
systemctl status cri-o #To check service status of cri-o
q #To exit from the command
crictl ps --name openvswitch #To verify openvswitch pod running status from node shell
CONTAINER ID STATE NAME ATTEMPT POD ID
13f0b0ed3497a Running openvswitch 0 4bc278dddf007
oc project PROJECT_NAME #To connect to a project
oc get pod #To list pods in a connected project
oc status #To get the status
oc get events #List all events from current project
skopeo inspect docker://registry.access.redhat.com/rhscl/postgresq-96-rhel7:1 #To find info about container image from events | Image name is incorrect
skopeo inspect docker://registry.access.redhat.com/rhscl/postgresql-96-rhel7:1 #Image name is correct no errors
oc edit deployment/psql #To edit the deployment
image:registry.access.redhat.com/rhscl/postgresql-96-rhel7:1
spec:
replicas: 2 #To set number of replicas of the pod
oc status
oc get pods
Pod should up and running
Storage:
oc get project #To list project
oc new-project install-storage #To create a new project
oc get storageclass #To list the default storage class
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
nfs-storage (default) nfs-storage-provisioner Delete Immediate false 37
oc new-app --name postgresql-persistent \
> --docker-image registry.redhat.io/rhel8/postgresql-12:1-43 \
> -e POSTGRESQL_USER=redhat \
> -e POSTGRESQL_PASSWORD=redhat123 \
> -e POSTGRESQL_DATABASE=persistentdb
oc status
oc get pods -o wide
oc set volumes deployment/postgresql-persistent \
> --add --name postgresql-storage --type pvc --claim-class nfs-storage \
> --claim-mode rwo --claim-size 10Gi --mount-path /var/lib/pgsql \
> --claim-name postgresql-storage
oc get pvc #To list PVC
NAME STATUS CAPACITY ACCESS MODES STORAGECLASS AGE
postgresql-storage Bound 10Gi RWO nfs-storage 25s
#To list the PV
oc get pv \
> -o custom-columns=NAME:.metadata.name,CLAIM:.spec.claimRef.name
NAME CLAIM
pvc-26cc804a-4ec2-4f52-b6e5-84404b4b9def image-registry-storage
pvc-65c3cce7-45eb-482d-badf-a6469640bd75 postgresql-storage
oc delete all -l app=postgresql-persistent #To delete resources that contain the app=postgresql-persistent label
#To add existing volume
oc set volumes \
> deployment/postgresql-persistent2 \
> --add --name postgresql-storage --type pvc \
> --claim-name postgresql-storage --mount-path /var/lib/pgsql
oc delete pvc/postgresql-storage #To delete pvc
#List all cluster role bindings that reference the self-provisioner cluster role
oc get clusterrolebinding -o wide \
> | grep -E 'NAME|self-provisioner'
oc describe clusterrolebindings self-provisioners
#Remove the self-provisioner cluster role from the system:authenticated:oauth virtual group
oc adm policy remove-cluster-role-from-group \
> self-provisioner system:authenticated:oauth
oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth
oc describe clusterrolebindings self-provisioners #Role has been removed from the group. The cluster role binding self-provisioners should not exist.
oc get clusterrolebinding -o wide \
> | grep -E 'NAME|self-provisioner'
oc get clusterrolebinding -o wide | grep -E 'NAME|self-provisioner'