PSU:2 node RAC Patching 11.2.0.3 (no Standby DR)
set
up ssh key:
oracle@ns9010
eval
`ssh-agent`; ssh-add ~/.ssh/cps_dis
take
a hot backup:
oracle@ns9010
echo c111pvq | dbash -f
/dba/common/oracle/audits/takeasnap -t 240 -w 10
shutdown:
oracle@ns9010
echo c111pvq c111sbr | dbash -c
'/dba/bin/maint_mode.sh -e -r "CR0626051 PSU5 patching"' -t 240 -w 10
echo c111pvq c111sbr | dbash -f
/dba/common/oracle/dscripts/distributed_stop_dbs -t 240 -w 10
echo c111pvq c111sbr | dbash -f /dba/common/oracle/dscripts/dist_abort_check
-t 240 -w 10
echo c111pvq c111sbr | dbash -f
/dba/common/oracle/dscripts/verify_db_stopped -w 10
echo c111pvq c111sbr | dbash -f
/dba/common/oracle/dscripts/distributed_stop_nodeapps_11203 -w 10 -t 60
echo c111pvq c111sbr | dbash -f
/dba/common/oracle/dscripts/verify_nodeapps_stopped -w 10 -t 60
##
disable database so it doesn't start when clusterware is restarted
echo c111pvq c111sbr | dbash -f
/dba/common/oracle/dscripts/distributed_disable_dbs -w 10 -t 60
##
Check for remaining Oracle processes
##
NOTE clusterware should still be up for RAC databases
echo c111pvq c111sbr | dbash -c 'ps -ef | grep
oracle'
##
upgrade opatch version if less than 11.2.0.3.3
echo c111pvq c111sbr | dbash -f
/dba/common/oracle/dscripts/dg_patching/upgrade_opatch.11203
##
Validate Oracle Inventories
echo c111pvq c111sbr | dbash -f
/dba/common/oracle/audits/check_lsinventory_prepsu5
##
stop DB Console if it is running
echo c111pvq c111sbr | dbash -f
/dba/common/oracle/audits/stop_dbconsole
## For
RAC databases, the PSU must be applied from an empty directory located on a
shared filesystem
##
The shared filesystem must be accessible from all cluster nodes and it must
have at least 2G free
##
Preferred options are /n01/oraflash1 or /n01/oradata1
##
Run the following script to identify a candidate NAS filesystem for applying
the PSU
echo c111pvq | dbash -f
/dba/common/oracle/audits/psu5_patching_find_a_nas_mount
patch:
##
Patch 14727347 (11.2.0.3.5 Grid Infrastructure + Database Patch Set Update)
##
README instructions for are located at:
##https://theshare.thomsonreuters.com/sites/DBA/CPSDBA/Project%20Documentation/Projects%202013/11.2.0.3%20PSU%20patching/14727347%20GI+DB%20PSU5%20README.html
##
Apply patch 14727347 (11.2.0.3.5 Grid Infrastructure + Database Patch Set
Update) on all cluster nodes, one node at a time
oracle@
c111pvq
eval
`ssh-agent`
ssh-add
##
check ssh working
ssh
c111pvq "date; /bin/hostname"
ssh
c111sbr "date; /bin/hostname"
cd
$ORACLE_HOME/OPatch
##
run CR0626051_c111pvq_patch_rollback.sh to rollback conflict patches
CR0626051_c111pvq_patch_rollback.sh
cd
$ORACLE_HOME/OPatch
##
run CR0626051_c111pvq_copy_unzip.sh to copy and unzip one-off patches
CR0626051_c111pvq_copy_unzip.sh
##
Next copy patch 14727347 to an empty directory on the NAS mount identified
earlier
##
Example Steps using /n01/oradata1:
## cd /n01/oradata1 && mkdir
psu5patching && cd psu5patching
cp
/tools/dba/oracle/linux_64/11203_patches/psu5/p14727347_112030_Linux-x86-64.zip
.
unzip
p14727347_112030_Linux-x86-64.zip
oracle@
c111pvq
##
cd to NAS location where patch 14727347 was unzipped and run the 'opatch auto'
command as root
##
NOTE: the 'opatch auto' command will take approx 20-30 minutes to complete; you
can tail the log file to track progress
##
Example Steps using /n01/oradata1:
## cd /n01/oradata1/psu5patching
su
root (enter root password)
opatch
auto . -ocmrf /u01/app/oracle/product/11.2.0.3/db/OPatch/ocm.rsp
oracle@
c111sbr
##
cd to NAS location where patch 14727347 was unzipped and run the 'opatch auto'
command as root
##
NOTE: the 'opatch auto' command will take approx 20-30 minutes to complete; you
can tail the log file to track progress
##
Example Steps using /n01/oradata1:
## cd /n01/oradata1/psu5patching
su
root (enter root password)
opatch
auto . -ocmrf /u01/app/oracle/product/11.2.0.3/db/OPatch/ocm.rsp
oracle@
c111pvq
##
Next run CR0626051_c111pvq_patch_apply.sh to apply remaining one-off patches
cd
$ORACLE_HOME/OPatch
CR0626051_c111pvq_patch_apply.sh
##
then run post-patch steps
##
run on node 1 only; if there are multiple databases, repeat the following steps
FOR EACH DATABASE ON THIS HOST:
cd
$ORACLE_HOME/rdbms/admin
sqlplus
/nolog
SQL>
CONNECT / AS SYSDBA
SQL>
STARTUP
SQL>
@catbundle.sql psu apply
SQL>
shutdown immediate
SQL>
QUIT
##
Check the log files in
/dba/common/u01/app/oracle/product/11.1.0/client/cfgtoollogs/catbundle for any
errors
##
Next clean up the NAS patch location once patching is successfully completed
##
NOTE: be very careful to remove ONLY THE PATCHING DIRECTORY and nothing else
##
Example Steps using /n01/oradata1:
## cd /n01/oradata1/psu5patching
ls (confirm dir contains only patching files)
rm
*
rm
-rf 15876003
rm
-rf 14727310
cd
..
rmdir
psu5patching
post-patch:
oracle@ns9010
##
make sure all patches applied
echo c111pvq c111sbr | dbash -f
/dba/common/oracle/dscripts/dg_patching/verify_11203_lion.patches.psu5
echo c111pvq c111sbr | dbash -c '.
~/.bash_profile; opatch util cleanup -silent'
##
Validate Oracle Inventories
echo c111pvq c111sbr | dbash -f /dba/common/oracle/audits/check_lsinventory_postpsu5
startup:
##
re-enable database
echo c111pvq c111sbr | dbash -f
/dba/common/oracle/dscripts/distributed_enable_dbs -w 10 -t 60
echo c111pvq c111sbr | dbash -f
/dba/common/oracle/dscripts/distributed_start_nodeapps_11203 -w 10 -t 60
echo c111pvq c111sbr | dbash -f
/dba/common/oracle/dscripts/verify_nodeapps_started_mpb -w 10
echo c111pvq c111sbr | dbash -f
/dba/common/oracle/dscripts/distributed_start_dbs -t 240 -w 10
echo c111pvq c111sbr | dbash -f /dba/common/oracle/dscripts/verify_db_started
-w 10
##
load packages for patch 14742362
echo c111pvq
| dbash -f '/dba/common/oracle/audits/postpatch14742362' -t 240 -w 10
##
start DB Console back up if it is configured
echo c111pvq c111sbr | dbash -f /dba/common/oracle/audits/start_dbconsole
echo c111pvq c111sbr | dbash -c
'/dba/bin/maint_mode.sh -d' -t 240 -w 10
echo c111pvq c111sbr | dbash -f
/dba/common/oracle/dscripts/dba_checkout -t 240 -w 10
No comments:
Post a Comment