Search This Blog

Monday, 6 September 2021

OCP4 OC Commands


oc login -u USER_NAME -p PASSWORD  https://api.ocp4.example.com:6443
oc get nodes

NAME        STATUS ROLES                 AGE     VERSION
master01 Ready     master,worker 2d         v1.18.3+012b3ec
master02 Ready     master,worker 2d         v1.18.3+012b3ec
master03 Ready     master,worker 2d          v1.18.3+012b3ec
oc adm top node
NAME            CPU(cores)     CPU%     MEMORY(bytes) MEMORY%
master01     499m                 14%     3235Mi                     21%
master02     769m                 21%     4933Mi                     33%
master03     1016m               29%     6087Mi                     40%
oc describe node master01
oc get project                          #To list the projects
oc get pod -n PROJECT_NAME      #To list the pod in a project 
NAME                                                            Ready   STATUS RESTARTS   AGE
cluster-image-registry-operator-56     2/2       Running           0                   13m

#To get logs of the container if pod Ready > 1 | Need to specify a container name 
oc logs --tail 3 -n openshift-image-registry 
               > -c cluster-image-registry-operator
              >    cluster-image-registry-operator-564bd5dd8f-s46bz

#To get logs of the container if pod Ready = 1
oc logs --tail 3 -n openshift-image-registry 
               >    cluster-image-registry-operator-564bd5dd8f-s46bz

oc adm node-logs --tail 1 -u kubelet NODE_NAME  #To get kubelet logs from a node
oc debug node/NODE_NAME #To start a shell session on the node
chroot /host                                                #To enter local file system of the host
uname -n                                                      #To check host name
systemctl status kubelet                 #To check service status of kubelet
systemctl status cri-o                 #To check service status of cri-o
q                 #To exit from the command


crictl ps --name openvswitch #To verify openvswitch pod running status from node shell 
CONTAINER ID        STATE NAME ATTEMPT POD ID
13f0b0ed3497a     Running openvswitch 0 4bc278dddf007

oc project PROJECT_NAME #To connect to a project
oc get pod #To list pods in a connected project
oc status #To get the status 
oc get events #List all events from current project

skopeo inspect docker://registry.access.redhat.com/rhscl/postgresq-96-rhel7:1  #To find info about container image from events | Image name is incorrect
skopeo inspect docker://registry.access.redhat.com/rhscl/postgresql-96-rhel7:1 #Image name is correct  no errors  
oc edit deployment/psql #To edit the deployment

image:registry.access.redhat.com/rhscl/postgresql-96-rhel7:1
spec:
replicas: 2 #To set number of replicas of the pod
oc status
oc get pods 
Pod should up and running 

Storage:


oc get project #To list project
oc new-project install-storage #To create a new project
oc get storageclass         #To list the default storage class

NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
nfs-storage (default) nfs-storage-provisioner     Delete Immediate false 37

oc new-app --name postgresql-persistent \
> --docker-image registry.redhat.io/rhel8/postgresql-12:1-43 \
> -e POSTGRESQL_USER=redhat \
> -e POSTGRESQL_PASSWORD=redhat123 \
> -e POSTGRESQL_DATABASE=persistentdb

oc status
oc get pods -o wide

oc set volumes deployment/postgresql-persistent \
> --add --name postgresql-storage --type pvc --claim-class nfs-storage \
> --claim-mode rwo --claim-size 10Gi --mount-path /var/lib/pgsql \
> --claim-name postgresql-storage

oc get pvc #To list PVC 

NAME STATUS  CAPACITY ACCESS MODES STORAGECLASS AGE
postgresql-storage Bound 10Gi RWO nfs-storage 25s

                                                                                 #To list the PV
oc get pv \
> -o custom-columns=NAME:.metadata.name,CLAIM:.spec.claimRef.name

NAME CLAIM
pvc-26cc804a-4ec2-4f52-b6e5-84404b4b9def image-registry-storage
pvc-65c3cce7-45eb-482d-badf-a6469640bd75 postgresql-storage

oc delete all -l app=postgresql-persistent    #To delete resources that contain the app=postgresql-persistent label
    #To add existing volume
oc set volumes \
> deployment/postgresql-persistent2 \
> --add --name postgresql-storage --type pvc \
> --claim-name postgresql-storage --mount-path /var/lib/pgsql


oc delete pvc/postgresql-storage #To delete pvc



#List all cluster role bindings that reference the self-provisioner cluster role

oc get clusterrolebinding -o wide \

> | grep -E 'NAME|self-provisioner'

oc describe clusterrolebindings self-provisioners

#Remove the self-provisioner cluster role from the system:authenticated:oauth virtual group

oc adm policy remove-cluster-role-from-group \

> self-provisioner system:authenticated:oauth

oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth


oc describe clusterrolebindings self-provisioners #Role has been removed from the group. The cluster role binding self-provisioners should not exist.


oc get clusterrolebinding -o wide \

> | grep -E 'NAME|self-provisioner'


oc get clusterrolebinding -o wide | grep -E 'NAME|self-provisioner'


oc get users
oc get identity
oc edit oauth

htpasswd -c -B -b /tmp/htpasswd student Studentpassword #Create hppasswd filed | -c option only when creating a new file. The -c option replaces all file
                                                             content if the file already exists.
 
htpasswd -b /tmp/htpasswd student Studentpassword #Add or update credentials
htpasswd -D /tmp/htpasswd student #Delete credentials
#Creating the HTPasswd Secret
oc create secret generic htpasswd-secret \
> --from-file htpasswd=/tmp/htpasswd -n openshift-config
#Extracting Secret Data
oc extract secret/htpasswd-secret -n openshift-config \
> --to /tmp/ --confirm
#Updating the HTPasswd Secret
oc set data secret/htpasswd-secret \
> --from-file htpasswd=/tmp/htpasswd -n openshift-config

#After updating the secret, the OAuth operator redeploys pods in the openshiftauthentication
                                                             namespace.
watch oc get pods -n openshift-authentication

#Deleting Users and Identities

htpasswd -D /tmp/htpasswd manager #To delete the user from htpasswd
#Update the secret to remove all remnants of the user's password.
oc set data secret/htpasswd-secret \
> --from-file htpasswd=/tmp/htpasswd -n openshift-config

oc delete user manager #Remove the user resource
oc get identities | grep manager #Identity resources include the name of the identity provider.
oc delete identity my_htpasswd_provider:manager #Delete identified resources

#Assigning Administrative Privileges

oc adm policy add-cluster-role-to-user cluster-admin student #Assign the cluster-admin role to the student user


oc delete secret localusers -n openshift-config
oc delete user --all
oc delete identity --all

Ansible_Notes

   ansible -i inventory.ini -m ping all   # inventory.ini it will ping to the hosts which are in inventory # ansible -i inventory.ini -m pin...