Thursday, May 28, 2015

Partitioning existing table in oracle with exchange and split

Create a partitioned table from a non partitioned table



Non Partitioned table - zzt_test


CREATE UNIQUE INDEX SVADAPALLI.ZIP_SERVICELOG1 ON SVADAPALLI.ZZT_TEST
(HOST_NAME, PORT, CONTEXT_ID);

ALTER TABLE SVADAPALLI.ZZT_TEST ADD (
  CONSTRAINT ZIP_SERVICELOG1
  PRIMARY KEY
  (HOST_NAME, PORT, CONTEXT_ID)
  USING INDEX SVADAPALLI.ZIP_SERVICELOG1
  ENABLE VALIDATE);

  Create an empty partitioned table

  CREATE TABLE SVADAPALLI.ZZT_SERVICE_LOG
(
  SERVICE_PATH_NAME     VARCHAR2(200 BYTE)      NOT NULL,
  SERVICE_NAME          VARCHAR2(50 BYTE)       NOT NULL,
  SUBSERVICE_PATH_NAME  VARCHAR2(200 BYTE),
  SENDER                VARCHAR2(50 BYTE),
  RECORD_COUNT          NUMBER(30),
  DATA_SIZE             NUMBER(30),
  STATUS                VARCHAR2(10 BYTE),
  INSTANCE_TYPE         VARCHAR2(10 BYTE),
  START_DATE_TIME       VARCHAR2(20 BYTE)       NOT NULL,
  END_DATE_TIME         VARCHAR2(20 BYTE)       NOT NULL,
  USER_INFO             VARCHAR2(100 BYTE),
  SERVICE_INFO          VARCHAR2(100 BYTE),
  HOST_NAME             VARCHAR2(50 BYTE)       NOT NULL,
  PORT                  VARCHAR2(10 BYTE)       NOT NULL,
  USER_DATA             CLOB                    DEFAULT EMPTY_CLOB(),
  ERROR_MESSAGE         VARCHAR2(4000 BYTE),
  CONTEXT_ID            VARCHAR2(50 BYTE)       NOT NULL,
  PARENT_CONTEXT_ID     VARCHAR2(50 BYTE),
  ROOT_CONTEXT_ID       VARCHAR2(50 BYTE),
  LOG_DATA              CHAR(1 BYTE)            DEFAULT 'N',
  CREATE_DATE           VARCHAR2(20 BYTE)       DEFAULT SYSDATE               NOT NULL
)
LOB (USER_DATA) STORE AS LOBSEGMENT_SERVICE_LOG (
  TABLESPACE  HMA_TM_PROD_GEN2_MONITOR_DATA
  ENABLE      STORAGE IN ROW
  CHUNK       32768
  PCTVERSION  0
  NOCACHE
  LOGGING)
NOCOMPRESS
TABLESPACE HMA_TM_PROD_GEN2_MONITOR_DATA
RESULT_CACHE (MODE DEFAULT)
PCTUSED    0
PCTFREE    10
INITRANS   1
MAXTRANS   255
STORAGE    (
            BUFFER_POOL      DEFAULT
            FLASH_CACHE      DEFAULT
            CELL_FLASH_CACHE DEFAULT
           )
LOGGING
PARTITION BY RANGE (CREATE_DATE)
(
  PARTITION ZZT_SERVICE_LOG_2016MAX VALUES LESS THAN ('MAXVALUE')
    LOGGING
    NOCOMPRESS
    TABLESPACE USERS
    LOB (USER_DATA) STORE AS (
      TABLESPACE  USERS
      ENABLE      STORAGE IN ROW
      CHUNK       8192
      RETENTION
      NOCACHE
      LOGGING
          STORAGE    (
                      INITIAL          64K
                      NEXT             1M
                      MINEXTENTS       1
                      MAXEXTENTS       UNLIMITED
                      PCTINCREASE      0
                      BUFFER_POOL      DEFAULT
                      FLASH_CACHE      DEFAULT
                      CELL_FLASH_CACHE DEFAULT
                     ))
    PCTFREE    10
    INITRANS   1
    MAXTRANS   255
    STORAGE    (
                INITIAL          64K
                NEXT             1M
                MAXSIZE          UNLIMITED
                MINEXTENTS       1
                MAXEXTENTS       UNLIMITED
                BUFFER_POOL      DEFAULT
                FLASH_CACHE      DEFAULT
                CELL_FLASH_CACHE DEFAULT
               )
)
NOCACHE
NOPARALLEL
MONITORING
ENABLE ROW MOVEMENT;


CREATE UNIQUE INDEX SVADAPALLI.ZIP_SERVICELOG ON SVADAPALLI.ZZT_SERVICE_LOG
(HOST_NAME, PORT, CONTEXT_ID)
LOGGING
TABLESPACE HMA_TM_PROD_GEN2_MONITOR_INDEX
PCTFREE    10
INITRANS   2
MAXTRANS   255
STORAGE    (
            INITIAL          64K
            NEXT             1M
            MAXSIZE          UNLIMITED
            MINEXTENTS       1
            MAXEXTENTS       UNLIMITED
            PCTINCREASE      0
            BUFFER_POOL      DEFAULT
            FLASH_CACHE      DEFAULT
            CELL_FLASH_CACHE DEFAULT
           )
NOPARALLEL;


CREATE INDEX SVADAPALLI.ZX4_SERVICE_LOG ON SVADAPALLI.ZZT_SERVICE_LOG
(CREATE_DATE)
LOGGING
TABLESPACE HMA_TM_PROD_GEN2_MONITOR_DATA
PCTFREE    10
INITRANS   2
MAXTRANS   255
STORAGE    (
            INITIAL          64K
            NEXT             1M
            MAXSIZE          UNLIMITED
            MINEXTENTS       1
            MAXEXTENTS       UNLIMITED
            PCTINCREASE      0
            BUFFER_POOL      DEFAULT
            FLASH_CACHE      DEFAULT
            CELL_FLASH_CACHE DEFAULT
           )
NOPARALLEL;


CREATE INDEX SVADAPALLI.ZX1_SERVICE_LOG ON SVADAPALLI.ZZT_SERVICE_LOG
(ROOT_CONTEXT_ID)
LOGGING
TABLESPACE HMA_TM_PROD_GEN2_MONITOR_INDEX
PCTFREE    10
INITRANS   2
MAXTRANS   255
STORAGE    (
            INITIAL          64K
            NEXT             1M
            MAXSIZE          UNLIMITED
            MINEXTENTS       1
            MAXEXTENTS       UNLIMITED
            PCTINCREASE      0
            BUFFER_POOL      DEFAULT
            FLASH_CACHE      DEFAULT
            CELL_FLASH_CACHE DEFAULT
           )
NOPARALLEL;



ALTER TABLE SVADAPALLI.ZZT_SERVICE_LOG ADD (
  CONSTRAINT ZIP_SERVICELOG
  PRIMARY KEY
  (HOST_NAME, PORT, CONTEXT_ID)
  USING INDEX SVADAPALLI.ZIP_SERVICELOG
  ENABLE VALIDATE);

Verify DBMS redefinition

EXEC DBMS_REDEFINITION.can_redef_table('svadapalli','zzt_test');


  Once the primary keys match you can exchange partition


  ALTER TABLE zzt_service_log
  EXCHANGE PARTITION zzt_service_log_2016max
  WITH TABLE zzt_test
  WITHOUT VALIDATION
  UPDATE GLOBAL INDEXES;



  After exchange you can split and create new partitions from split partition document



Example for partition with range date( splitting from max partition)

ALTER TABLE SVADAPALLI.SUB_EVENT_MASTER_SALES_CLOSE SPLIT PARTITION PART_MAXI AT(TO_DATE(' 2012-01-31 23:59:59', 'SYYYY-MM-DD HH24:MI:SS', 'NLS_CALENDAR=GREGORIAN')) INTO (PARTITION JAN_2012 TABLESPACE HMA_TM_PROD_DATA, PARTITION PART_MAXI) UPDATE GLOBAL INDEXES;




Thursday, May 21, 2015

Change data file name or location in oracle

First take the tablespace offline

alter tablespace "HMA_TM_PROD_GEN2_DATA" offline normal

Copy or mv the datafile or rename it

mv -i /oracle/oradata1/hma_tm_prod_gen2_data_03.dbf /oracle/oradata1/shsub/hma_tm_prod_gen2_data_03.dbf

Bring the tablespace back online
alter tablespace "HMA_TM_PROD_GEN2_DATA" online

Enjoy!

Wednesday, May 20, 2015

expdp backup script , compress and clear files older than 7 days

oracle@shsub:/opt/oradiag/diag/rdbms/shsub/shsub/trace>cat /export/home/oracle/scripts/bin/expdp_full_all_DBs.ksh
#!/bin/ksh
set -x
# Script to do an export datapump of database, compress it, and delete dumps older than 7 days
. ~oracle/.profile

#/ora_share_nfs/scripts/expdp_full.ksh shsub /opt/app/oracle/product/11.2.0/11.2.0.4 EXPDP_DUMPDIR 4 5G 7 /ora_share_nfs/backup/shsub
#/ora_share_nfs/scripts/expdp_full.ksh shwm  /opt/app/oracle/product/11.2.0/11.2.0.4 EXPDP_DUMPDIR 4 5G 7 /ora_share_nfs/backup/shwm

/ora_share_nfs/scripts/expdp_full_audit_and_wallet.ksh shsub /opt/app/oracle/product/11.2.0/11.2.0.4 EXPDP_DUMPDIR 4 5G 7 /ora_share_nfs/hmaissubdb01/shsub
/ora_share_nfs/scripts/expdp_full_audit_and_wallet.ksh shwm  /opt/app/oracle/product/11.2.0/11.2.0.4 EXPDP_DUMPDIR 4 5G 7 /ora_share_nfs/hmaissubdb01/shwm

oracle@shsub:/opt/oradiag/diag/rdbms/shsub/shsub/trace>cat /ora_share_nfs/scripts/expdp_full.ksh
#!/bin/ksh
#set -x
#                               Usage:
#                               /export/home/oracle/scripts/bin/expdp_full.ksh
#                               parameters $ORACLE_SID $ORACLE_HOME EXPDP_DIR NO_PARALLEL FILESIZE RETENTION DEST_DIR
#
#                               Sample
#                               /export/home/oracle/scripts/bin/expdp_full.ksh dktms  /opt/app/oracle/product/11.2.0/11.2.0.3 EXPDP_DUMPDIR 4 5G 7 /ora_share_nfs/kmaistmsdb/dktms
#                               /export/home/oracle/scripts/bin/expdp_full.ksh dktms2 /opt/app/oracle/product/11.2.0/11.2.0.3 EXPDP_DUMPDIR 4 5G 7 /ora_share_nfs/kmaistmsdb/dktms2
#                               /export/home/oracle/scripts/bin/expdp_full.ksh dktms3 /opt/app/oracle/product/11.2.0/11.2.0.3 EXPDP_DUMPDIR 4 5G 7 /ora_share_nfs/kmaistmsdb/dktms3
#                               /export/home/oracle/scripts/bin/expdp_full.ksh dktms4 /opt/app/oracle/product/11.2.0/11.2.0.3 EXPDP_DUMPDIR 4 5G 7 /ora_share_nfs/kmaistmsdb/dktms4
#                               /export/home/oracle/scripts/bin/expdp_full.ksh dktms  /opt/app/oracle/product/11.2.0/11.2.0.3 EXPDP_DUMPDIR 4 5G 7 /ora_share_nfs/kmaistmsdb/sktms
#                               /export/home/oracle/scripts/bin/expdp_full.ksh dktms3 /opt/app/oracle/product/11.2.0/11.2.0.3 EXPDP_DUMPDIR 4 5G 7 /ora_share_nfs/kmaistmsdb/sktms3
#                               /export/home/oracle/scripts/bin/expdp_full.ksh dktms4 /opt/app/oracle/product/11.2.0/11.2.0.3 EXPDP_DUMPDIR 4 5G 7 /ora_share_nfs/kmaistmsdb/sktms4
#
. ~oracle/.profile
# Script to do an export datapump of database, compress it, and delete dumps older than 7 days

# Variables go here. Change according to environment and needs

RUN_DATE=`date "+%m%d%Y_%H%M%S"`

export ORACLE_SID=$1
export ORACLE_HOME=$2

export EXPDP_DIR=$3
export NO_PARALLEL=$4
export FILESIZE=$5
export RETENTION=$6
export DEST_DIR=$7
export HOSTNAME=`hostname`

export LOG_FILE_01=/export/home/oracle/scripts/logs/${ORACLE_SID}_expdp_full_01.log
export LOG_FILE_02=/export/home/oracle/scripts/logs/${ORACLE_SID}_expdp_full_02.log

echo "parameters: " $ORACLE_SID $ORACLE_HOME $EXPDP_DIR $NO_PARALLEL $FILESIZE $RETENTION $DEST_DIR

MAIL_TO='his-dba-alerts@hke.local'

# End of variables

#       Remove old dmp files (generated by manually or from previous failed job)

ls $DEST_DIR/*full*dmp

ls $DEST_DIR/*full*dmp > $LOG_FILE_01
#grep "No such file or directory" $LOG_FILE_01 > $LOG_FILE_02
#grep "dmp" $LOG_FILE_01 > $LOG_FILE_02
if [ -s $LOG_FILE_01 ];then
        rm $DEST_DIR/*full*dmp
fi

if [ $NO_PARALLEL -eq '0' ]; then
#       expdp \"\/ as sysdba\" full=y directory=${EXPDP_DIR} dumpfile=${ORACLE_SID}_full_${RUN_DATE}_%U.dmp filesize=${FILESIZE} logfile=${ORACLE_SID}_full_${RUN_DATE}.log compression=all EXCLUDE=STATISTICS
        $ORACLE_HOME/bin/expdp \"\/ as sysdba\" full=y directory=${EXPDP_DIR} dumpfile=${ORACLE_SID}_full_${RUN_DATE}_%U.dmp filesize=${FILESIZE} logfile=${ORACLE_SID}_full_${RUN_DATE}.log EXCLUDE=STATISTICS
else
#       expdp \"\/ as sysdba\" full=y directory=${EXPDP_DIR} dumpfile=${ORACLE_SID}_full_${RUN_DATE}_%U.dmp parallel=${NO_PARALLEL} filesize=${FILESIZE} logfile=${ORACLE_SID}_full_${RUN_DATE}.log compression=all EXCLUDE=STATISTICS
        $ORACLE_HOME/bin/expdp \"\/ as sysdba\" full=y directory=${EXPDP_DIR} dumpfile=${ORACLE_SID}_full_${RUN_DATE}_%U.dmp parallel=${NO_PARALLEL} filesize=${FILESIZE} logfile=${ORACLE_SID}_full_${RUN_DATE}.log EXCLUDE=STATISTICS
fi

EXPDP_STATUS=$?
echo
echo "EXPDP_STATUS: " $EXPDP_STATUS
echo
if [ ${EXPDP_STATUS} -eq 0 ]; then
        sqlplus / as sysdba <<EOF
                --      Backup spfile by create pfile from spfile.
                create pfile='${DEST_DIR}/init${ORACLE_SID}_${RUN_DATE}.ora' FROM spfile;
EOF
        #       Archive and compress backup
        cd ${DEST_DIR}
        tar cEvf ${ORACLE_SID}_full_${RUN_DATE}.tar ${DEST_DIR}/${ORACLE_SID}_full_${RUN_DATE}_*.dmp
        TAR_STATUS=$?
        echo
        echo "TAR_STATUS: " $TAR_STATUS
        echo
        if [ ${TAR_STATUS} -eq 0 ]; then
                gzip ${ORACLE_SID}_full_${RUN_DATE}.tar
                GZIP_STATUS=$?
                echo
                echo "GZIP_STATUS: " $GZIP_STATUS
                echo
                if [ ${GZIP_STATUS} -eq 0 ]; then
                #bzip2 ${ORACLE_SID}_full_${RUN_DATE}.tar
                #BZIP2_STATUS=$?
                #if [ ${BZIP2_STATUS} -eq 0 ]; then
                        #Delete original dumps
                        rm ${DEST_DIR}/${ORACLE_SID}_full_${RUN_DATE}_*.dmp
                        find ${DEST_DIR}/${ORACLE_SID}_full_* -mtime +${RETENTION} -exec rm -f {} \;
                        find ${DEST_DIR}/init${ORACLE_SID}_*.ora -mtime +${RETENTION} -exec rm -f {} \;
                        echo "${ORACLE_SID}@$HOSTNAME: full export succeeded." | mailx -s "${ORACLE_SID}@$HOSTNAME: Backup by expdp Notification: SUCCESS" ${MAIL_TO}
                else
                        #Do a cleanup
                        rm ${DEST_DIR}/${ORACLE_SID}_full_${RUN_DATE}_*.dmp
                        rm ${DEST_DIR}/${ORACLE_SID}_full_${RUN_DATE}.tar
                        echo "${ORACLE_SID}@$HOSTNAME: full export failed by gzip command." | mailx -s "${ORACLE_SID}@$HOSTNAME: Backup by expdp Notification: FAILED" ${MAIL_TO}
                        #echo "${ORACLE_SID}@$HOSTNAME: full export failed by gzip command." | mailx -s "${ORACLE_SID}@$HOSTNAME: Backup by expdp Notification: FAILED" ${MAIL_TO} < $DEST_DIR/${ORACLE_SID}_full_${RUN_DATE}.log
                fi
        else
                #Do a cleanup
                rm ${DEST_DIR}/${ORACLE_SID}_full_${RUN_DATE}_*.dmp
                rm ${DEST_DIR}/${ORACLE_SID}_full_${RUN_DATE}.tar
                rm ${DEST_DIR}/${ORACLE_SID}_full_${RUN_DATE}.tar.gz
                echo "${ORACLE_SID}@$HOSTNAME: full export failed by tar command." | mailx -s "${ORACLE_SID}@$HOSTNAME: Backup by expdp Notification: FAILED" ${MAIL_TO}
                #echo "${ORACLE_SID}@$HOSTNAME: full export failed by tar command." | mailx -s "${ORACLE_SID}@$HOSTNAME: Backup by expdp Notification: FAILED" ${MAIL_TO} < $DEST_DIR/${ORACLE_SID}_full_${RUN_DATE}.log
        fi
else
        echo "${ORACLE_SID}@$HOSTNAME: full export failed." | mailx -s "${ORACLE_SID}@$HOSTNAME: Backup by expdp Notification: FAILED" ${MAIL_TO}
        #echo "${ORACLE_SID}@$HOSTNAME: full export failed." | mailx -s "${ORACLE_SID}@$HOSTNAME: Backup by expdp Notification: FAILED" ${MAIL_TO} < $DEST_DIR/${ORACLE_SID}_full_${RUN_DATE}.log
fi

Friday, May 15, 2015

Using Index hint in oracle database

If running from different schema you need to use alias table name.

In the below example i used temp

select /*+index(temp temp_idx_01) */ * from hma_tm_prod.ddx temp where vin like '%123454';


If running from same schema then we can you table name directly.

select /*+index(ddx temp_idx_01) */ * from ddx where vin like '%123454';






Thursday, May 7, 2015

Oracle RAC Upgrade 11.2.0.1 to 11.2.0.4

11.2.0.1 to 11.2.0.4 upgrade Oracle RAC

Stop database.
Stop crs

Pre Req Patch application


First apply patch 9706490

The above patch is a pre req for upgrade from 11.2.0.1 to 11.2.0.4

oracle:srvctl stop database -d cool
root:crsctl stop cluster -all

[root@rac1 9706490]# /u01/app/11.2.0/grid/crs/install/rootcrs.pl -unlock

Patching crs home first

[oracle@rac1 ~]$ export PATH=/u01/app/11.2.0/grid/OPatch:$PATH
[oracle@rac1 ~]$ which opatch
/u01/app/11.2.0/grid/OPatch/opatch
[oracle@rac1 ~]$ echo $ORACLE_HOME
/u01/app/oracle/product/11.2.0/dbhome_1
[oracle@rac1 ~]$ export ORACLE_HOME=/u01/app/11.2.0/grid/
[oracle@rac1 ~]$ echo $ORACLE_HOME
/u01/app/11.2.0/grid/
[oracle@rac1 ~]$ cd /u01
[oracle@rac1 u01]$ cd 9706490/
[oracle@rac1 9706490]$ opatch napply -local -oh /u01/app/11.2.0/grid/ -id 9706490

Once opatch is success.

Now patch db home

[oracle@rac1 9706490]$ cd custom/scripts
[oracle@rac1 scripts]$ . oraenv
ORACLE_SID = [orcl] ? cool1
The Oracle base for ORACLE_HOME=/u01/app/oracle/product/11.2.0/dbhome_1 is /u01/app/oracle
[oracle@rac1 scripts]$ echo $ORACLE_HOME
/u01/app/oracle/product/11.2.0/dbhome_1
[oracle@rac1 scripts]$ sh prepatch.sh -dbhome /u01/app/oracle/product/11.2.0/dbhome_1
prepatch.sh completed successfully.
[oracle@rac1 scripts]$ cd ../..
[oracle@rac1 9706490]$ opatch napply custom/server/ -local -oh /u01/app/oracle/product/11.2.0/dbhome_1 -id 9706490

After success. Do similar steps in node 2 .


[oracle@rac2 9706490]$ cd custom/scripts
[oracle@rac2 scripts]$ sh postpatch.sh -dbhome /u01/app/oracle/product/11.2.0/dbhome_1
[root@rac1]# /u01/app/11.2.0/grid/crs/install/rootcrs.pl -patch
[root@rac2]# /u01/app/11.2.0/grid/crs/install/rootcrs.pl -patch


***************************************************************************

GRID SOFTWARE UPGRADE


I did not shutdown crs or database on any node for the upgrade.. It may vary

unzip grid folder
runInstaller
select upgrade existing grid infrastructure
Create a new home folder on the 2 nodes.
Give the path in installer.. Follow on screen instructions..
Fix any prechecks or ignore
Run rootupgrade.sh on node1 and node2

Last message you should see is

Configure Oracle Grid Infrastructure for a Cluster ... succeeded


Thats it.

verify
[root@rac1 app]# crsctl query crs activeversion
Oracle Clusterware active version on the cluster is [11.2.0.4.0]
Enjoy new clusterware version 11.2.0.4 :)

********************************************************************************

DATABASE SOFTWARE UPGRADE


Now Upgrade the database version

Create a new oracle home for database

using gui

cd database
runInstaller

follow on screen instructions

install database software only

run root scripts.

This completes installation of oracle db software.

**************************************************************************

Existing DB Upgrade


Now upgrade existing db

Go to 11.2.0.4 oracle home/bin

Run dbua

Follow instructions.


After completion

Verify by

select banner from v$version;

Enjoy :)

Troubleshooting agent in oem 12c

Find when is the last upload time based on hosts. Execute following query in repository database.

select mt.target_name,
(select max(mmc.collection_timestamp)
from sysman.mgmt$metric_current mmc
where mmc.target_name=mt.target_name) last_collection
from sysman.mgmt$target mt
where host_name='hmaiptmsdb2'
group by mt.target_name
order by last_collection;

Wednesday, May 6, 2015

Patching in Rac with opatch auto and manual steps

*&^&*^&*^*&^*&^&*^*&^*^ test1:  *&^&*^&*^*&^*&^&*^*&^*^
--------------------------------------------------------
su - root
cd /opt/app/crs/11.2.0.3/
mv OPatch OPatch_bak
unzip latest OPatch as root
chown -R oracle:dba OPatch
exit
cd /export/home/oracle/patch/
unzip patch 19440385 to the above folder
cd /opt/app/oracle/product/11.2.0.3/
mv OPatch OPatch_bak
unzip latest OPatch
cd /opt/app/crs/11.2.0.3/OPatch/ocm/bin
./emocmrsp
--provide email id and password for metalink
--check for ocm.rsp file created

###################### GRID+DB Patching test1 Start #######################

***********(scenario-1)***********
su - root
cd /opt/app/crs/11.2.0.3/OPatch
opatch auto /export/home/oracle/patch/ -ocmrf /opt/app/crs/11.2.0.3/OPatch/ocm/bin/ocm.rsp -local
exit
--verify logs for any failures

***********(scenario-2)***********
srvctl stop instance -d test -i test1

su - root
cd /opt/app/crs/11.2.0.3/bin
which crs
crsctl stop crs
exit root

-- grid patch to grid home - 17592127 is grid patch id
cd /export/home/oracle/patch/
export GRID=HOME=/opt/app/crs/11.2.0.3
export ORACLE_HOME=$GRID_HOME
/opt/app/crs/11.2.0.3/OPatch/opatch napply -id 17592127 -oh /opt/app/crs/11.2.0.3 -ocmrf /opt/app/crs/11.2.0.3/OPatch/ocm/bin/ocm.rsp -local

--grid patch to db home - 17592127 is grid patch id
export ORACLE_HOME=/opt/app/oracle/product/11.2.0.3
cd /export/home/oracle/patch/17592127/custom/server/
/opt/app/crs/11.2.0.3/OPatch/opatch napply -id 17592127 -oh /opt/app/oracle/product/11.2.0.3 -ocmrf /opt/app/crs/11.2.0.3/OPatch/ocm/bin/ocm.rsp -local

--database patch to database home - 19121548 is db PSU patch id
cd /export/home/oracle/patch/
/opt/app/oracle/product/11.2.0.3/OPatch/opatch napply -id 19121548 -oh /opt/app/oracle/product/11.2.0.3 -ocmrf /opt/app/crs/11.2.0.3/OPatch/ocm/bin/ocm.rsp -local

cd /opt/app/oracle/product/11.2.0.3/OPatch

--check grid patch updates
export ORACLE_HOME=/opt/app/crs/11.2.0.3
./opatch lsinventory

--check Database patch updates
export ORACLE_HOME=/opt/app/oracle/product/11.2.0.3
./opatch lsinventory

su - root
cd /opt/app/crs/11.2.0.3/bin
which crs
crsctl start crs
--after 5 mins check crs status, all 4 services should be online
crsctl check crs
crsctl stat res -t
exit root

srvctl start instance -d test -i test1

######################## GRID+DB Patching test1 Stop ##########################




*&^&*^&*^*&^*&^&*^*&^*^ test2:  *&^&*^&*^*&^*&^&*^*&^*^
--------------------------------------------------------
cd /export/home/oracle/patch/
unzip patch 19440385 to the above folder
cd /opt/app/oracle/product/11.2.0.3/
mv OPatch OPatch_bak
unzip latest OPatch
cd /opt/app/crs/11.2.0.3/
mv OPatch OPatch_bak
unzip latest OPatch
cd /opt/app/crs/11.2.0.3/OPatch/ocm/bin
./emocmrsp
--provide email id and password for metalink
--check for ocm.rsp file created

###################### GRID+DB Patching test2 Start #######################

***********(schenario-1)***********
su - root
cd /opt/app/crs/11.2.0.3/OPatch
opatch auto /export/home/oracle/patch/ -ocmrf /opt/app/crs/11.2.0.3/OPatch/ocm/bin/ocm.rsp -local
exit--verify logs for any failures

***********(schenario-2)***********
srvctl stop instance -d test -i test2
su - root
cd /opt/app/crs/11.2.0.3/bin
which crs
crsctl stop crs
exit root

-- grid patch to grid home - 17592127 is grid patch id
export GRID=HOME=/opt/app/crs/11.2.0.3
export ORACLE_HOME=$GRID_HOME
/opt/app/crs/11.2.0.3/OPatch/opatch napply -id 17592127 -oh /opt/app/crs/11.2.0.3 -ocmrf /opt/app/crs/11.2.0.3/OPatch/ocm/bin/ocm.rsp -local

--grid patch to db home
export ORACLE_HOME=/opt/app/oracle/product/11.2.0.3
cd /export/home/oracle/patch/17592127/custom/server/
/opt/app/crs/11.2.0.3/OPatch/opatch napply -id 17592127 -oh /opt/app/oracle/product/11.2.0.3 -ocmrf /opt/app/crs/11.2.0.3/OPatch/ocm/bin/ocm.rsp -local

--database patch to database home - 19121548 is db PSU patch id
/opt/app/oracle/product/11.2.0.3/OPatch/opatch napply -id 19121548 -oh /opt/app/oracle/product/11.2.0.3 -ocmrf /opt/app/crs/11.2.0.3/OPatch/ocm/bin/ocm.rsp -local

cd /opt/app/oracle/product/11.2.0.3/OPatch

--check grid patch updates
export ORACLE_HOME=/opt/app/crs/11.2.0.3
./opatch lsinventory

--check Database patch updates
export ORACLE_HOME=/opt/app/oracle/product/11.2.0.3
./opatch lsinventory

su - root
cd /opt/app/crs/11.2.0.3/bin
which crs
crsctl start crs
--after 5 mins check crs status, all 4 services should be online
crsctl check crs
crsctl stat res -t
exit root

srvctl start instance -d test -i test2

######################## GRID+DB Patching test2 Stop ##########################

-- After all nodes are done perform this post installation script in onely 1 instance
sqlplus "/ as sysdba"
@/opt/app/oracle/product/11.2.0.3/rdbms/admin/catbundle.sql psu apply
exit

-- After post installation of script is done perform the below steps for JVM patching

test1:
-------
cd /export/home/oracle/patch
unzip 19282015 patch
srvctl stop instance -d test -i test1
su - root
cd /opt/app/crs/11.2.0.3/bin
which crsctl
crsctl stop crs
exit

test2:
-------
cd /export/home/oracle/patch
unzip 19282015 patch
srvctl stop instance -d test -i test2
su - root
cd /opt/app/crs/11.2.0.3/bin
which crsctl
crsctl stop crs
exit

test1:
-------
export ORACLE_HOME=/opt/app/oracle/product/11.2.0.3
cd /export/home/oracle/patch/19282015
$ORACLE_HOME/OPatch/opatch apply -local
-- verify the patch installation in lsinventory
$ORACLE_HOME/OPatch/opatch lsinventory

test1:
-------
export ORACLE_HOME=/opt/app/oracle/product/11.2.0.3
cd /export/home/oracle/patch/19282015
$ORACLE_HOME/OPatch/opatch apply -local
-- verify the patch installation in lsinventory
$ORACLE_HOME/OPatch/opatch lsinventory

--After JVM apply on all nodes is done bring up both instances

test1:
-------
su - root
cd /opt/app/crs/11.2.0.3/bin
which crs
crsctl start crs
--after 5 mins check crs status, all 4 services should be online
crsctl check crs
crsctl stat res -t
exit root

srvctl start instance -d test -i test1


test2:
-------
su - root
cd /opt/app/crs/11.2.0.3/bin
which crs
crsctl start crs
--after 5 mins check crs status, all 4 services should be online
crsctl check crs
crsctl stat res -t
exit root

srvctl start instance -d test -i test2

--After bringing up all nodes, run the below sql scripts in only 1 node.

cd /export/home/oracle/patch/19282015
sqlplus "/ as sysdba"
spool postinstall_log.log
@postinstall.sql
spool off;
spool utlrp_log.log
@/opt/app/oracle/product/11.2.0.3/utlrp.sql
spool off;
exit


Note: Opatch may some times throw error if we say -local. If thats the case remove and run it


*******************************************************************************

Another easy way to patch using opatch auto for both grid and db homes

Opatch Grid home owned by oracle:dba

Opatch db home owned by oracle:dba

Patch folder unzipped by oracle

opatch auto to be run by root

ocm.rsp file to be generated by oracle user in both grid and db home

./opatch auto /export/home/oracle/oracle_software/25476126 -oh /opt/app/crs/11.2.0.4 -ocmrf /opt/app/crs/11.2.0.4/OPatch/ocm/bin/ocm.rsp

./opatch auto /export/home/oracle/oracle_software/25476126 -oh /opt/app/oracle/product/11.2.0/11.2.0.4/ -ocmrf /opt/app/oracle/product/11.2.0/11.2.0.4/OPatch/ocm/bin/ocm.rsp

************
if response file creation fails with motif error

please install below

pkg install pkg://solaris/library/motif


Featured Post

Apply Patch 22191577 latest GI PSU to RAC and DB homes using Opatch auto or manual steps

Patch 22191577: GRID INFRASTRUCTURE PATCH SET UPDATE 11.2.0.4.160119 (JAN2016) Unzip the patch 22191577 Unzip latest Opatch Version in or...