Monday, December 26, 2016

ASM diskgroup usage

To monitor ASM Diskgroup Usage

SET LINESIZE  145
SET PAGESIZE  9999
SET VERIFY    off
COLUMN group_name             FORMAT a20           HEAD 'Disk Group|Name'
COLUMN sector_size            FORMAT 99,999        HEAD 'Sector|Size'
COLUMN block_size             FORMAT 99,999        HEAD 'Block|Size'
COLUMN allocation_unit_size   FORMAT 999,999,999   HEAD 'Allocation|Unit Size'
COLUMN state                  FORMAT a11           HEAD 'State'
COLUMN type                   FORMAT a6            HEAD 'Type'
COLUMN total_mb               FORMAT 999,999,999   HEAD 'Total Size (MB)'
COLUMN used_mb                FORMAT 999,999,999   HEAD 'Used Size (MB)'
COLUMN pct_used               FORMAT 999.99        HEAD 'Pct. Used'



break on report on disk_group_name skip 1
compute sum label "Grand Total: " of total_mb used_mb on report


SELECT
    name                                     group_name
  , sector_size                              sector_size
  , block_size                               block_size
  , allocation_unit_size                     allocation_unit_size
  , state                                    state
  , type                                     type
  , total_mb                                 total_mb
  , (total_mb - free_mb)                     used_mb
  , ROUND((1- (free_mb / total_mb))*100, 2)  pct_used
FROM
    v$asm_diskgroup
ORDER BY
    name
/

Credits: Gavin Soorma



Monitoring using OEM

select collection_timestamp ,key_value DISK_GROUP,round(value/1024/1024,2) "Free TB",round((round(value/1024/1024,2)/7.57*100),2) "Used Space"
from
(select distinct target_name,target_type,target_guid from mgmt_targets where target_type='osm_cluster' and target_name='+ASM_xxxipcva-clus')
join (
 select distinct
  target_type,metric_name,metric_label,metric_column,column_label,short_name,metric_guid
 from mgmt_metrics
) using(target_type)
join mgmt_metrics_raw using(target_guid,metric_guid)
where key_value = 'RECO_DG' and collection_timestamp >= sysdate-0.5/24 and metric_label like '%Usage' and column_label like '%Usable Free%' --and ROWNUM <= 5
order by collection_timestamp desc ;

Wednesday, December 21, 2016

Disable logging on Oracle objects and database

ALTER TABLE tablename MODIFY PARTITION 2016dec NOLOGGING;

Alter table table_name nologging;

select table_name, logging  from dba_tables where table_name like 'test%';

select partition_name,logging from DBA_TAB_PARTITIONS where table_name='tablename';


ALTER TABLE tablename MODIFY LOB (USER_DATA) (NOCACHE NOLOGGING);


To enable force logging on db

ALTER DATABASE force logging;

ALTER TABLESPACE users FORCE LOGGING;

To disable:

ALTER DATABASE no force logging;

ALTER TABLESPACE users NO FORCE LOGGING;

Monday, December 19, 2016

Configuring DBFS on Exadata

Requirements:

Fuse, Fuse libs, Kernel Devel package, Repository database with big file tablespace

Install Fuse, Fuse libs and Kernel Devel package

Run all the steps below until directory creation on both the database nodes.

# cd /etc/yum.repos.d

Verify if you have public repository is present. If not wget it. Or else when ever you try to run install command it says nothing to do

# /usr/openv/pdde/pdopensource/bin/wget http://public-yum.oracle.com/public-yum-ol6.repo

After the above is added run the below to install fuse and kernel-devel

# yum install fuse fuse-libs kernel-devel

Once installed you will get

Setting up Install Process
Package fuse-2.9.4-1.0.1.el6.x86_64 already installed and latest version
Package fuse-libs-2.9.4-1.0.1.el6.x86_64 already installed and latest version
Package kernel-devel-2.6.32-642.11.1.el6.x86_64 already installed and latest version
Nothing to do


Optional - Dos2Unix

# sudo yum install dos2unix

Create the mount directory

# mkdir /dbfs_mnt
# chown oracle:dba /dbfs_mnt

*****************************************************************

Create Repository Database testdbfs

Create tablespace and dbfs_user

CREATE BIGFILE TABLESPACE DBFS_DATA DATAFILE '+DATA_DG' SIZE 1T AUTOEXTEND OFF NOLOGGING
ONLINE
EXTENT MANAGEMENT LOCAL AUTOALLOCATE
BLOCKSIZE 8K
SEGMENT SPACE MANAGEMENT AUTO
FLASHBACK ON;

CREATE USER DBFS_USER IDENTIFIED BY "Password" DEFAULT TABLESPACE DBFS_DATA TEMPORARY TABLESPACE TEMP;
 
grant create session, resource, create view, dbfs_role to dbfs;

$ cd $ORACLE_HOME/rdbms/admin
$ sqlplus dbfs_user/dbfs_user

-- Here dbfs_Data is tablespace name and oracle is the folder name that will be created inside dbfs_mnt directory

@$ORACLE_HOME/rdbms/admin/dbfs_create_filesystem dbfs_data oracle


*****************************************************************
Run on Both nodes

Configure Shared Libraries
# echo "/usr/local/lib" >> /etc/ld.so.conf.d/usr_local_lib.conf
# cd /usr/local/lib
# ln -s $ORACLE_HOME/lib/libclntsh.so.11.1
# ln -s $ORACLE_HOME/lib/libnnz11.so
# locate libfuse.so
# ln -s /lib64/libfuse.so.2 libfuse.so
# ldconfig
# ldconfig -p | grep fuse
# ln -s $ORACLE_HOME/bin/dbfs_client /sbin/mount.dbfs

# echo user_allow_other > /etc/fuse.conf
# chmod 644 /etc/fuse.conf

*****************************************************************

Download and modify mount-dbfs.zip from oracle website (Configuring DBFS on Oracle Exadata Database Machine (Doc ID 1054431.1)

Copy this zip to tmp folder on one of the server

# unzip mount-dbfs-20160215.zip
Archive:  mount-dbfs-20160215.zip
  inflating: mount-dbfs.conf
  inflating: mount-dbfs.sh
# dos2unix mount-dbfs.conf
dos2unix: converting file mount-dbfs.conf to UNIX format ...
# dos2unix mount-dbfs.sh
dos2unix: converting file mount-dbfs.sh to UNIX format ...

You will have two files

1)mount-dbfs.sh
2)mount-dbfs.conf

We need to modify following values in conf file and leave the others as-is

DBNAME=testdbfs

MOUNT_POINT=/dbfs_mnt

DBFS_USER=dbfs_user

ORACLE_HOME=/u01/app/oracle/product/11.2.0.4/dbhome_1

GRID_HOME=/u01/app/11.2.0.4/grid

DBFS_PASSWD=Password

Once modified we need to copy these files to their respective locations on both nodes

using of dcli command here will copy to both nodes. So you dont need  to repeat the same

Create a group called dbs_group in root home directory and include the 2 nodes
# cat dbs_group
10.111.11.111
10.111.11.222

# dcli -g ~/dbs_group -l root -d /u01/app/11.2.0.4/grid/crs/script/ -f /tmp/mount-dbfs.sh
# dcli -g ~/dbs_group -l root chown oracle:dba /u01/app/11.2.0.4/grid/crs/script/mount-dbfs.sh
# dcli -g ~/dbs_group -l root chmod 750 /u01/app/11.2.0.4/grid/crs/script/mount-dbfs.sh
# dcli -g ~/dbs_group -l root -d /etc/oracle -f /tmp/mount-dbfs.conf
# dcli -g ~/dbs_group -l root chmod 640 /etc/oracle/mount-dbfs.conf

Once copied verify all the locations and check permissions.

Now test the functionality to see if mounts are starting properly
$ cd $GRID_HOME/crs/scripts/
$ ./mount-dbfs.sh start
$ ./mount-dbfs.sh status
$ ./mount-dbfs.sh stop

When you start you can verify if the mount is up by checking df -kh . This should list the new mount
Once verified now its time to add to cluster
*****************************************************************
Create a shell script on one of the nodes and run it
$ vi add-dbfs-resource.sh
##### start script add-dbfs-resource.sh
#!/bin/bash
ACTION_SCRIPT=/u01/app/11.2.0.4/grid/crs/script/mount-dbfs.sh
RESNAME=dbfs_mnt
DBNAME=testdbfs
DBNAMEL=`echo $DBNAME | tr A-Z a-z`
ORACLE_HOME=/u01/app/11.2.0.4/grid
PATH=$ORACLE_HOME/bin:$PATH
export PATH ORACLE_HOME
crsctl add resource $RESNAME \
  -type local_resource \
  -attr "ACTION_SCRIPT=$ACTION_SCRIPT, \
         CHECK_INTERVAL=30,RESTART_ATTEMPTS=10, \
         START_DEPENDENCIES='hard(ora.$DBNAMEL.db)pullup(ora.$DBNAMEL.db)',\
         STOP_DEPENDENCIES='hard(ora.$DBNAMEL.db)',\
         SCRIPT_TIMEOUT=300"
##### end script add-dbfs-resource.sh

Run it as Oracle user

$ add-dbfs-resource.sh

Now verify using to see if the mount shows up in resources

crsctl status res -t

To start the mounts

crsctl start resource dbfs_mnt

To stop the mounts

crsctl stop resource dbfs_mnt -f

*****************************************************************

Note: When stopping make sure no session is open and pointing to the folder. If you are inside the folder and try to unmount you get error saying Busy.

If you try to stop the database when mounts are online you will get error as there is dependencies. To shut down we need to specify force option

After eveything is mounted the filesystem permissions will be changed back to root:root

srvctl start database -d testdbfs -f

You can also unmount using following

fusermount -u /dbfs_mnt

To force

umount -l  /dbfs_mnt
fusermount -uz /dbfs_mnt

*****************************************************************
To drop filesystem

@dbfs_drop_filesystem.sql oracle

*****************************************************************
Testing mounts if working properly

$ # Connection prompts for password and holds session.
$ dbfs_client dbfs_user@testdbfs /dbfs_mnt

$ # Connection retrieves password from file and releases session.
echo Password > pw.f
$ nohup dbfs_client dbfs_user@testdbfs /dbfs_mnt  < pw.f &
*****************************************************************

For debugging

Please refer to
http://www.hhutzler.de/blog/debugging-prolbems-when-mounting-a-dbfs/

$ means oracle user
# means root user




Friday, December 16, 2016

Run SQL statements in background UNIX

To run sql statements in background

Copy all the sql statements to a file name test.sql on the unix server

Then run the following using nohup

nohup sqlplus / as sysdba @test.sql &

or

nohup sqlplus USERNAME/password@DBNAME @test.sql &


view output in nohup.out

Tuesday, November 15, 2016

Converting physical standby database to snapshot standby

Converting physical standby database to snapshot standby

All the following steps to be executed on standby database only..


Set the size for recovery area.

Alter system set db_recovery_file_dest_size=100G;

Set Flash recovery area.

Alter system set db_recovery_file_dest='+RECO_DG'

Stop managed recovery if it is active.
 
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE CANCEL;

Bring the physical standby database to mount stage.

Startup mount;

Convert physical standby database to snapshot standby database.

ALTER DATABASE CONVERT TO SNAPSHOT STANDBY;

Shutdown Immediate;

Startup;

select open_mode,database_role from v$database;

OPEN_MODE  DATABASE_ROLE
---------- ----------------
READ WRITE SNAPSHOT STANDBY


Converting Snapshot standby database to physical standby


Shutdown Immediate;

Startup mount;

ALTER DATABASE CONVERT TO PHYSICAL STANDBY;

Shutdown immediate;

Startup mount;

select open_mode,database_role from v$database;

OPEN_MODE  DATABASE_ROLE
---------- ----------------
MOUNTED    PHYSICAL STANDBY

ALTER DATABASE RECOVER MANAGED STANDBY DATABASE DISCONNECT FROM SESSION;


Note: During this time archivelogs are getting shipped to standby from primary


Convert Physical standby database to active standby for testing

To convert physical standby database to primary for testing scenarios


Step 1 - In Standby database

Set up a flash recovery area.

If Flash Recovery Area ( FRA ) is not configured in the standby then enable it and make sure to give enough space for to FRA

ALTER SYSTEM SET DB_RECOVERY_FILE_DEST_SIZE=500G;
ALTER SYSTEM SET DB_RECOVERY_FILE_DEST='+RECO_DG';

Cancel Redo Apply and create a guaranteed restore point.

ALTER DATABASE RECOVER MANAGED STANDBY DATABASE CANCEL;
CREATE RESTORE POINT testing GUARANTEE FLASHBACK DATABASE;

To Confirim the details of restore point and its scn and time stamp run

 select NAME,SCN,TIME from v$restore_point;

NAME                                                               SCN                    TIME
--------------------------------------------------     -------------    ------------------------------
TESTING     2254478210    11-JUN-16 01.10.21.000000000 P


Step 2 - In Primary Database

ALTER SYSTEM ARCHIVE LOG CURRENT;

Optional - Defer log archive destinations pointing to the standby that will be activated.

--ALTER SYSTEM SET LOG_ARCHIVE_DEST_STATE_2=DEFER;

Step 3 - In Standby database

Activate the physical standby database:

ALTER DATABASE ACTIVATE STANDBY DATABASE;

Once its done you can check the controlfile status will be changed from Standby to Current

select CONTROLFILE_TYPE from v$database;

CONTROL
-------
CURRENT


Then open the database.

ALTER DATABASE SET STANDBY DATABASE TO MAXIMIZE PERFORMANCE;
ALTER DATABASE OPEN;

Revert the active standby database back to Physical standby database


Mount the database.
Flashback the database to restore point.

STARTUP MOUNT;

ORACLE instance started.
Total System Global Area  289406976 bytes
Fixed Size                  1290208 bytes
Variable Size             159383584 bytes
Database Buffers          125829120 bytes
Redo Buffers                2904064 bytes
Database mounted.

FLASHBACK DATABASE TO RESTORE POINT testing ;


You can confirm the same by checking the controlfile status. It will be now backup controlfile

select controlfile_type from v$database;

CONTROL
--------------
BACKUP

Convert to Standby database

ALTER DATABASE CONVERT TO PHYSICAL STANDBY;
STARTUP MOUNT;
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE DISCONNECT;
select controlfile_type from v$database;

CONTROL
--------------
STANDBY


In Primary database

Optional(If deferred before) - Re-enable archiving to the physical standby database:

ALTER SYSTEM ARCHIVE LOG CURRENT;
--ALTER SYSTEM SET LOG_ARCHIVE_DEST_STATE_2=ENABLE;

In Standby database

Drop the restore point

STARTUP FORCE MOUNT;
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE DISCONNECT;
DROP RESTORE POINT testing ;


Note: Archivelogs will not be shipped when the db is in read/write mode from primary. So make sure you are not deleting the archivelogs in primary until they are shipped here. If you have space constraints or archive logs are purged by a job, then flash back to restore point and do a incremental backup on primary and recover using it based on SCN.

Also during this time flashback_on parameter of database is set to NO

Wednesday, November 9, 2016

Migrating Single node Instace to two node RAC using Data Guard

Goal is to migrate single instance RAC DB to 2 Node RAC on a new server

This is to perform switch over with minimum downtime

PRIMARY(old server)
db_unique_name=test
db_name=test
instance_name=test1


Standby(new server)
db_unique_name=testdr
db_name=test
instance_name=test1,test2

Scan names, Vip names are different as this is not a server refresh

All the files are managed by oracle

datafile are stored in DATA_DG
Redologs are mirrored in DATA_DG and RECO_DG
Archive logs and backups are stored in RECO_DG


Folder structure for example (Automatically generated by oracle. No particular location specified)

primary
+DATA_DG/test/xxx.dbf      -- Just for reference

STANDBY
+DATA_DG/testdr/xxx.dbf   -- Just for reference



Run below at primary database

alter database force logging;

-- Give the primary name followed by primary unique name
alter system set LOG_ARCHIVE_CONFIG='DG_CONFIG=(test,testDR)' scope=both sid='*';

-- Log_archive_dest1 is set for the current database
alter system set LOG_ARCHIVE_DEST_1='LOCATION=USE_DB_RECOVERY_FILE_DEST VALID_FOR=(ALL_LOGFILES,ALL_ROLES) DB_UNIQUE_NAME=test' scope=both sid='*';

-- Log archive_dest_2 is set for standby database. This is where log shipping will be done
alter system set LOG_ARCHIVE_DEST_2='SERVICE=testDR ARCH VALID_FOR=(ONLINE_LOGFILES,PRIMARY_ROLE) DB_UNIQUE_NAME=testDR' scope=both sid='*';


alter system set log_archive_max_processes=8 scope=both sid='*';

-- Server will always be the other database
alter system set fal_server=testDR scope=both sid='*';

-- Set to auto because we want all the file management to be exactly same as primary and will be managed by oracle
alter system set standby_file_management=auto scope=both sid='*';

-- Commented the below lines because i am not creating standby redo logs as i am using dataguard just to do a cut over inorder to minimize downtime. We will not be using this as a DR

-- Standby redologs are used in maximum protection and maximum security modes of dataguard. We are using maximum performance so i skipped it

-- If you have requirement to create standby redo logs change file management to Manual. Then create logs and change management to Auto.

-- I am not using db_file_name_convert and log_file_name_convert because i am not creating folders manually. These are being created by oracle when performing duplicate



--alter system set standby_file_management=manual scope=both sid='*';
--alter system set db_file_name_convert='TESTDR','TEST' scope=spfile sid='*';
--alter system set log_file_name_convert='TESTDR','TEST' scope=spfile sid='*';

--mkdir +RECO_DG/TEST/STANDBYLOG

--alter database add standby logfile thread 1 group 4 '+RECO_DG/TEST/STANDBYLOG/standby_group_04.log' size 52M;
--alter database add standby logfile thread 1 group 5 '+RECO_DG/TEST/STANDBYLOG/standby_group_05.log' size 52M;
--alter database add standby logfile thread 1 group 6 '+RECO_DG/TEST/STANDBYLOG/standby_group_06.log' size 52M;

alter system set standby_file_management=auto scope=both sid='*';

create pfile='pfile_for_standby.txt' from spfile;

Copy pfile to the new server

scp $ORACLE_HOME/dbs/pfile_for_standby.txt exadb01:$ORACLE_HOME/dbs/pfile_for_standby.txt

Copy the password file from primary server to standby 2 nodes.

scp $ORACLE_HOME/dbs/orapwtest exadb01:$ORACLE_HOME/dbs/orapwtest1
scp $ORACLE_HOME/dbs/orapwtest exadb02:$ORACLE_HOME/dbs/orapwtest2

*********************************************************************************

Perform Below at STANDBY

Node1
Create a listener using db home. This one is used just for connecting to db as auxiliary during rman duplicate. This is because some times we get service blocked in listener status  if we use standard listener

Just create on one node. This can be stopped or removed after duplication is done.

LISTENER_TEMP=
      (DESCRIPTION=
       (ADDRESS_LIST=
        (ADDRESS= (PROTOCOL=TCP)(HOST=exadb01)(PORT=1525))
      )
     )

SID_LIST_LISTENER_TEMP =
    (SID_LIST=
     (SID_DESC=
     (SID_NAME=test1)
     (ORACLE_HOME=/u01/app/oracle/product/11.2.0.4/dbhome_1)
     )
    )

Once the listener is created start it and perform the below at each standby node


Node 1
mkdir -p /u01/app/oracle/admin/test/adump
mkdir -p /u01/app/oracle/diag/rdbms/testdr/test1
cd /u01/app/oracle/diag/rdbms/testdr/test1
mkdir trace cdump


Node 2
mkdir -p /u01/app/oracle/admin/test/adump
mkdir -p /u01/app/oracle/diag/rdbms/testdr/test2
cd /u01/app/oracle/diag/rdbms/testdr/test2
mkdir trace cdump


Edit pfile node1 at standby and change the following

*.audit_file_dest='/u01/app/oracle/admin/test/adump'

-- Just give diskgroups name here because rman duplicate will create the files automatically in the disk groups. After everything is done you need to modify this to reflect new files.
*.control_files='+DATA_DG','+RECO_DG'


*.db_unique_name='testdr'

-- Server will always be the other database
*.fal_server='test'

--Log_archive_dest1 is set for the current database
*.log_archive_dest_1='LOCATION=USE_DB_RECOVERY_FILE_DEST VALID_FOR=(ALL_LOGFILES,ALL_ROLES) DB_UNIQUE_NAME=testdr'

--Log_archive_dest2 is set for the standby database
*.log_archive_dest_2='SERVICE=test ARCH VALID_FOR=(ONLINE_LOGFILES,PRIMARY_ROLE) DB_UNIQUE_NAME=test'

*.remote_listener='exadb-scan:1521'

-- I am not using db_file_name_convert and log_file_name_convert because i am not creating folders manually. These are being created by oracle when performing duplicate
--*.log_file_name_convert='test','testdr'
--*.db_file_name_convert='test','testdr'

Modify tns and add testdr entries and test entries.. these entries should be in all tns files both in primary and standby



test =
  (DESCRIPTION =
    (ADDRESS = (PROTOCOL = TCP)(HOST = proddb-scan)(PORT = 1521))
    (CONNECT_DATA =
      (SERVER = DEDICATED)
      (SERVICE_NAME = test)
    )
  )

testDR =
  (DESCRIPTION =
    (ADDRESS = (PROTOCOL = TCP)(HOST = exadb01.hke.local)(PORT = 1521))
    (CONNECT_DATA =
      (SERVER = DEDICATED)
      (SERVICE_NAME = testdr)
 (SID = test1)
    )
  )

testDR1 =
  (DESCRIPTION =
    (ADDRESS = (PROTOCOL = TCP)(HOST = exadb01.hke.local)(PORT = 1521))
    (CONNECT_DATA =
      (SERVER = DEDICATED)
      (SERVICE_NAME = testdr)
      (SID = test1)
    )
  )

testDR2 =
  (DESCRIPTION =
    (ADDRESS = (PROTOCOL = TCP)(HOST = exadb02.hke.local)(PORT = 1521))
    (CONNECT_DATA =
      (SERVER = DEDICATED)
      (SERVICE_NAME = testdr)
      (SID = test2)
    )
  )

TEMP =
  (DESCRIPTION =
    (ADDRESS = (PROTOCOL = TCP)(HOST = exadb01.hke.local)(PORT = 1525))
    (CONNECT_DATA =
      (SID = test1)
    )
  )


On standby run the following

Note: Set proper environment settings before you do the below. instance names are case sensitive. Export the db with instance number before starting the db.

Also remove any parameters related to interconnect

sql > startup nomount pfile='/u01/app/oracle/product/11.2.0.4/dbhome_1/dbs/pfile_for_standby.txt';


On primary run the following


rman target sys/welcome1@test auxiliary sys/welcome1@TEMP

duplicate target database for standby from active database dorecover;

I am using active duplicate here. Using this we can skip the tedious process of taking backup of primary and copying it to standby and doing duplicate.

Using active duplicate directly copies everything from current running database to the new one.

*****
This might have impact on your network. So please check before performing this.

***** 
I am migrating these db's from exadata x4 to exadata x6. And the network link is faster. So i was able to complete duplication of a 7TB database very quickly.

Once this is done and database is in up state. The spfile will be completely messed up because while performing active clone it creates an spfile from memory.

So for future to keep the parameter file clean use the old pfile txt that we copied the beginning

On standby perform the below


Shutdown immediate;

Now edit the controlfile parameter to point to the newly created controlfiles because the parameterfile that we currently have doesnt contain exact controlfile location.

*.control_files='+DATA_DG/testdr/controlfile/current.411.926159161','+RECO_DG/testdr/controlfile/current.5242.926159161'

add the following parameters as well as copy the instance 1 parameters and rename it to instance 2

cluster_database=TRUE
test1.undo_tablespace='UNDOTBS1'
test2.undo_tablespace='UNDOTBS2'
test1.instance_number=1
test2.instance_number=2
test1.thread=1
test2.thread=2


On standby node 1



startup nomount pfile='/u01/app/oracle/product/11.2.0.4/dbhome_1/dbs/pfile_for_standby.txt';


create spfile='+data_dg/testdr/parameterfile/spfiletest.ora' from pfile='/u01/app/oracle/product/11.2.0.4/dbhome_1/dbs/pfile_for_standby.txt';

On standby node 1


vi inittest1.ora
spfile='+data_dg/testdr/parameterfile/spfiletest.ora'


On standby node 2


vi inittest2.ora
spfile='+data_dg/testdr/parameterfile/spfiletest.ora'


*********************************************************************************
Since we dont have 2nd node for this database in primary. I will need to create redologs for the 2nd thread

ALTER SYSTEM SET STANDBY_FILE_MANAGEMENT=MANUAL;


alter database add logfile thread 2 group 16 ('+DATA_DG','+RECO_DG') size 4G reuse;
alter database add logfile thread 2 group 17 ('+DATA_DG','+RECO_DG') size 4G reuse;
alter database add logfile thread 2 group 18 ('+DATA_DG','+RECO_DG') size 4G reuse;
alter database add logfile thread 2 group 19 ('+DATA_DG','+RECO_DG') size 4G reuse;
alter database add logfile thread 2 group 20 ('+DATA_DG','+RECO_DG') size 4G reuse;
alter database add logfile thread 2 group 21 ('+DATA_DG','+RECO_DG') size 4G reuse;
alter database add logfile thread 2 group 22 ('+DATA_DG','+RECO_DG') size 4G reuse;
alter database add logfile thread 2 group 23 ('+DATA_DG','+RECO_DG') size 4G reuse;
alter database add logfile thread 2 group 24 ('+DATA_DG','+RECO_DG') size 4G reuse;
alter database add logfile thread 2 group 25 ('+DATA_DG','+RECO_DG') size 4G reuse;
alter database add logfile thread 2 group 26 ('+DATA_DG','+RECO_DG') size 4G reuse;
alter database add logfile thread 2 group 27 ('+DATA_DG','+RECO_DG') size 4G reuse;
alter database add logfile thread 2 group 28 ('+DATA_DG','+RECO_DG') size 4G reuse;
alter database add logfile thread 2 group 29 ('+DATA_DG','+RECO_DG') size 4G reuse;
alter database add logfile thread 2 group 30 ('+DATA_DG','+RECO_DG') size 4G reuse;


ALTER SYSTEM SET STANDBY_FILE_MANAGEMENT=AUTO;


*********************************************************************************

Adding the 2 instances to cluster



srvctl add database -d test -o /u01/app/oracle/product/11.2.0.4/dbhome_1 -p +data_dg/testdr/parameterfile/spfiletest.ora -r physical_standby -a DATA_DG,RECO_DG
srvctl add instance -d test -i test1 -n exadb01
srvctl add instance -d test -i test2 -n exadb02
srvctl start database -d test

srvctl status database -d test

srvctl config database -d test


To change service name

alter system set service_names=test scope=both sid='*';

alter system register;

Now shut down everything

srvctl stop database -d test

*********************************************************************************

On standby Node 1


Now just start 1st instance manually without srvctl to mount state.

sql > Starup Mount


ALTER DATABASE RECOVER MANAGED STANDBY DATABASE DISCONNECT FROM SESSION;

To stop MRP

ALTER DATABASE RECOVER MANAGED STANDBY DATABASE CANCEL;

*********************************************************************************

Test dataguard functionality

Standby

--select process,status,sequence#,thread# from v$managed_standby;

select thread#,max(sequence#) from v$archived_log where applied='YES' group by thread#;

PRIMARY

select thread#,max(sequence#) from v$archived_log group by thread#;

or

select max(sequence#) from v$archived_log where  resetlogs_change#=(SELECT resetlogs_change# FROM v$database)

or

SELECT sequence# || '|' ||
thread# || '|' ||
TO_CHAR(completion_time, 'DD-MON-RRRR HH24:MI:SS')
FROM v$archived_log
WHERE sequence#= (SELECT MAX(sequence#) FROM v$archived_log where resetlogs_change#=(SELECT resetlogs_change# FROM v$database))
AND thread#=1
AND resetlogs_change#=(SELECT resetlogs_change# FROM v$database)
AND dest_id=1
UNION ALL
SELECT sequence# || '|' ||
thread# || '|' ||
TO_CHAR(completion_time, 'DD-MON-RRRR HH24:MI:SS')
FROM v$archived_log
WHERE sequence#= (SELECT MAX(sequence#) FROM v$archived_log where resetlogs_change#=(SELECT resetlogs_change# FROM v$database))
AND resetlogs_change#=(SELECT resetlogs_change# FROM v$database)
AND thread#=2
AND dest_id=1;


*********************************************************************************


Note: If you no longer want to use standby db or you just did dataguard as part of migration make sure you disable force logging on current primary db

ALTER DATABASE FORCE LOGGING;
*********************************************************************************
Best practice
 alter system set log_archive_dest_2 = 'service=STDBY ARCH OPTIONAL MAX_CONNECTIONS=5 VALID_FOR=(ALL_LOGFILES,PRIMARY_ROLE) db_unique_name=STDBY' scope = both ;
 But I think you should use LGWR
 alter system set log_archive_dest_2 = 'service=STDBY LGWR ASYNC VALID_FOR=(ALL_LOGFILES,PRIMARY_ROLE) db_unique_name=STDBY' scope = both ;

For LGWR use standby redo logs

create 5 groups of SRL
alter database add standby logfile thread 1 group 10 (' full path file name ') size 104857600 ;
alter database add standby logfile thread 1 group 11 (' full path file name ') size 104857600 ;
alter database add standby logfile thread 1 group 12 (' full path file name ') size 104857600 ;
alter database add standby logfile thread 1 group 13 (' full path file name ') size 104857600 ;
alter database add standby logfile thread 1 group 14 (' full path file name ') size 104857600 ;

Then change the redo transport
alter system set log_archive_dest_2 = 'service=STDBY ASYNC VALID_FOR=(ALL_LOGFILES,PRIMARY_ROLE) db_unique_name=STDBY' scope = both ;

Make sure you have enough db_Cache size on stdby

Tuesday, September 20, 2016

RMAN restore scenarios

Full restore and recover Database

RUN {
     ALLOCATE CHANNEL CH1 TYPE 'SBT_TAPE' PARMS 'SBT_PARMS=(NSR_SERVER=datadomain.domain.local,NSR_CLIENT=host.domain.local)';
     ALLOCATE CHANNEL CH2 TYPE 'SBT_TAPE' PARMS 'SBT_PARMS=(NSR_SERVER=datadomain.domain.local,NSR_CLIENT=host.domain.local)';
     ALLOCATE CHANNEL CH3 TYPE 'SBT_TAPE' PARMS 'SBT_PARMS=(NSR_SERVER=datadomain.domain.local,NSR_CLIENT=host.domain.local)';
     ALLOCATE CHANNEL CH4 TYPE 'SBT_TAPE' PARMS 'SBT_PARMS=(NSR_SERVER=datadomain.domain.local,NSR_CLIENT=host.domain.local)';

     RESTORE DATABASE;

     RECOVER DATABASE;

     RELEASE CHANNEL CH1;
     RELEASE CHANNEL CH2;
     RELEASE CHANNEL CH3;
     RELEASE CHANNEL CH4;
     ALTER DATABASE OPEN;

}

Restore only one tablespace

RUN {
     ALLOCATE CHANNEL CH1 TYPE 'SBT_TAPE' PARMS 'SBT_PARMS=(NSR_SERVER=datadomain.domain.local,NSR_CLIENT=host.domain.local)';
     ALLOCATE CHANNEL CH2 TYPE 'SBT_TAPE' PARMS 'SBT_PARMS=(NSR_SERVER=datadomain.domain.local,NSR_CLIENT=host.domain.local)';
     ALLOCATE CHANNEL CH3 TYPE 'SBT_TAPE' PARMS 'SBT_PARMS=(NSR_SERVER=datadomain.domain.local,NSR_CLIENT=host.domain.local)';
     ALLOCATE CHANNEL CH4 TYPE 'SBT_TAPE' PARMS 'SBT_PARMS=(NSR_SERVER=datadomain.domain.local,NSR_CLIENT=host.domain.local)';

     SQL 'ALTER TABLESPACE TABLESPACE_NAME OFFLINE IMMEDIATE';
     RESTORE TABLESPACE TABLESPACE_NAME;

     RECOVER TABLESPACE TABLESPACE_NAME;

     SQL 'ALTER TABLESPACE TABLESPACE_NAME ONLINE';

     RELEASE CHANNEL CH1;
     RELEASE CHANNEL CH2;
     RELEASE CHANNEL CH3;
     RELEASE CHANNEL CH4;

}

Point in time recovery of full database

RUN {
     ALLOCATE CHANNEL CH1 TYPE 'SBT_TAPE' PARMS 'SBT_PARMS=(NSR_SERVER=datadomain.domain.local,NSR_CLIENT=host.domain.local)';
     ALLOCATE CHANNEL CH2 TYPE 'SBT_TAPE' PARMS 'SBT_PARMS=(NSR_SERVER=datadomain.domain.local,NSR_CLIENT=host.domain.local)';
     ALLOCATE CHANNEL CH3 TYPE 'SBT_TAPE' PARMS 'SBT_PARMS=(NSR_SERVER=datadomain.domain.local,NSR_CLIENT=host.domain.local)';
     ALLOCATE CHANNEL CH4 TYPE 'SBT_TAPE' PARMS 'SBT_PARMS=(NSR_SERVER=datadomain.domain.local,NSR_CLIENT=host.domain.local)';

     RESTORE DATABASE UNTIL TIME = "TO_DATE('14/09/2016-13:23:42', 'DD/MM/YYYY-HH24:MI:SS')";

     RECOVER DATABASE UNTIL TIME = "TO_DATE('14/09/2016-13:23:42', 'DD/MM/YYYY-HH24:MI:SS')";

     RELEASE CHANNEL CH1;
     RELEASE CHANNEL CH2;
     RELEASE CHANNEL CH3;
     RELEASE CHANNEL CH4;

}
***************************************************************************
 Point in time recovery duplicate

At target

rman
connect  target sys/Password1@pdwh
connect  auxiliary /

run
{
set until time "to_date('Feb 02 2018 04:30:00','Mon DD YYYY HH24:MI:SS')";
duplicate
target database to xdwh;
}



Tablespace PITR

rman
connect  target sys/passwd1@phods
connect  auxiliary /


run
{
set until time "to_date('Aug 20 2018 16:00:00','Mon DD YYYY HH24:MI:SS')";
duplicate
target database to crit undo tablespace 'UNDOTBS1' tablespace 'HMA_ESB_DATA','HMA_ESB_INDEX';
}

Tuesday, September 13, 2016

Recovery manager (RMAN) Sample backup scripts for oracle

Rman setting for non-asm

RMAN> show all;
using target database control file instead of recovery catalog
RMAN configuration parameters for database with db_unique_name dbname are:
CONFIGURE RETENTION POLICY TO RECOVERY WINDOW OF 7 DAYS;
CONFIGURE BACKUP OPTIMIZATION ON;
CONFIGURE DEFAULT DEVICE TYPE TO DISK; # default
CONFIGURE CONTROLFILE AUTOBACKUP ON;
CONFIGURE CONTROLFILE AUTOBACKUP FORMAT FOR DEVICE TYPE DISK TO '/loc/host/dbname/rman/ctrl_bkp/dbname_ctl_auto_%F';
CONFIGURE DEVICE TYPE DISK PARALLELISM 1 BACKUP TYPE TO BACKUPSET; # default
CONFIGURE DATAFILE BACKUP COPIES FOR DEVICE TYPE DISK TO 1; # default
CONFIGURE ARCHIVELOG BACKUP COPIES FOR DEVICE TYPE DISK TO 1; # default
CONFIGURE CHANNEL DEVICE TYPE DISK MAXPIECESIZE 5 G;
CONFIGURE MAXSETSIZE TO UNLIMITED; # default
CONFIGURE ENCRYPTION FOR DATABASE OFF; # default
CONFIGURE ENCRYPTION ALGORITHM 'AES128'; # default
CONFIGURE COMPRESSION ALGORITHM 'BASIC' AS OF RELEASE 'DEFAULT' OPTIMIZE FOR LOAD TRUE ; # default
CONFIGURE ARCHIVELOG DELETION POLICY TO NONE; # default
CONFIGURE SNAPSHOT CONTROLFILE NAME TO '/loc/dbs/snapcf_dbname.f';

Rman backup script non-asm (Full and Incremental)

https://gist.github.com/sravanrox/d2ac7544c5757b8a0cf054ca844ce040

Rman setting for asm

RMAN> show all;

using target database control file instead of recovery catalog
RMAN configuration parameters for database with db_unique_name dbname are:
CONFIGURE RETENTION POLICY TO RECOVERY WINDOW OF 1 DAYS;
CONFIGURE BACKUP OPTIMIZATION ON;
CONFIGURE DEFAULT DEVICE TYPE TO DISK; # default
CONFIGURE CONTROLFILE AUTOBACKUP ON;
CONFIGURE CONTROLFILE AUTOBACKUP FORMAT FOR DEVICE TYPE DISK TO '+RECO_DG';
CONFIGURE DEVICE TYPE DISK PARALLELISM 8 BACKUP TYPE TO BACKUPSET;
CONFIGURE DATAFILE BACKUP COPIES FOR DEVICE TYPE DISK TO 1; # default
CONFIGURE ARCHIVELOG BACKUP COPIES FOR DEVICE TYPE DISK TO 1; # default
CONFIGURE CHANNEL DEVICE TYPE DISK FORMAT   '+RECO_DG';
CONFIGURE MAXSETSIZE TO UNLIMITED; # default
CONFIGURE ENCRYPTION FOR DATABASE OFF; # default
CONFIGURE ENCRYPTION ALGORITHM 'AES128'; # default
CONFIGURE COMPRESSION ALGORITHM 'BASIC' AS OF RELEASE 'DEFAULT' OPTIMIZE FOR LOAD TRUE ; # default
CONFIGURE ARCHIVELOG DELETION POLICY TO NONE; # default
CONFIGURE SNAPSHOT CONTROLFILE NAME TO '/u01/app/oracle/product/11.2.0.4/dbhome_1/dbs/snap_dbname.cf';

Rman backup script asm (Full and Incremental)

https://gist.github.com/sravanrox/6e2bacd5413da1276ce8009742c9a1eb

Archive log backup script non-asm

https://gist.github.com/sravanrox/933bb8586706359004349d791dd8c953

Archive log backup script asm

https://gist.github.com/sravanrox/7bbef3f96883334892c1dafb73c858ee

In the ASM script for archive backup, We need to call following script which will check current ASM filesystem usage



Note:
Non ASM scripts are from Solaris
ASM scripts are from RHEL

Friday, September 2, 2016

Rman Estimated time query

To check estimated run time of a currently running backup please use the following queries. If its a rac us gv$ instead of v$

SELECT SID, SERIAL#, CONTEXT, SOFAR, TOTALWORK,
       ROUND(SOFAR/TOTALWORK*100,2) "%_COMPLETE"
FROM   V$SESSION_LONGOPS
WHERE  OPNAME LIKE 'RMAN%'
--AND    PNAME NOT LIKE '%aggregate%'
AND    TOTALWORK != 0
AND    SOFAR <> TOTALWORK;


select sl.sid, sl.opname,
       to_char(100*(sofar/totalwork), '990.9')||'%' pct_done,
       sysdate+(TIME_REMAINING/60/60/24) done_by
  from v$session_longops sl, v$session s
 where sl.sid = s.sid
   and sl.serial# = s.serial#
   and sl.sid in (select sid from v$session where module like 'backup%' or module like 'restore%' or module like 'rman%')
   and sofar != totalwork
        and totalwork > 0
/

Thursday, August 18, 2016

Creating scheduler jobs in oracle database

Take for example you are trying to run database stats every weekly on the whole database. 

You can do it either by a shell script and running it in Crontab

or

You can use DBMS_SCHEDULER package to create a scheduler job

Here i show how to use dbms_scheduler and setup stats job.

First create a procedure in the schema you are trying to run the job from


 CREATE OR REPLACE procedure SYS.Manual_full_db_stats as  
 BEGIN  
     SYS.DBMS_STATS.GATHER_DATABASE_STATS ( Granularity => 'DEFAULT' ,OPTIONS => 'GATHER' ,Gather_Sys => FALSE ,Estimate_Percent => 10 ,Method_Opt => 'FOR ALL COLUMNS SIZE 1' ,Degree => 6 ,CASCADE => TRUE ,No_Invalidate => FALSE);  
 END;  
 /  


Once the procedure is created you can create a scheduler job.. If you want to use plsql block instead of a procedure just modify the below code and create the job
    ,job_type    => 'PLSQL_BLOCK'  
    ,job_action   => 'BEGIN   
  SYS.MANUAL_FULL_DB_STATS;  
  COMMIT;   
 END;   
 '  


Main code
 BEGIN  
  SYS.DBMS_SCHEDULER.CREATE_JOB  
   (  
     job_name    => 'SYS.MANUAL_STATS_JOB'  
    ,start_date   => TO_TIMESTAMP_TZ('2016/08/20 14:00:00.000000 America/Los_Angeles','yyyy/mm/dd hh24:mi:ss.ff tzr')  
    ,repeat_interval => 'FREQ=WEEKLY;INTERVAL=1'  
    ,end_date    => NULL  
    ,job_class    => 'DEFAULT_JOB_CLASS'  
    ,job_type    => 'STORED_PROCEDURE'  
    ,job_action   => 'SYS.MANUAL_FULL_DB_STATS'  
    ,comments    => NULL  
   );  
  SYS.DBMS_SCHEDULER.SET_ATTRIBUTE  
   ( name   => 'SYS.MANUAL_STATS_JOB'  
    ,attribute => 'RESTARTABLE'  
    ,value   => FALSE);  
  SYS.DBMS_SCHEDULER.SET_ATTRIBUTE  
   ( name   => 'SYS.MANUAL_STATS_JOB'  
    ,attribute => 'LOGGING_LEVEL'  
    ,value   => SYS.DBMS_SCHEDULER.LOGGING_OFF);  
  SYS.DBMS_SCHEDULER.SET_ATTRIBUTE_NULL  
   ( name   => 'SYS.MANUAL_STATS_JOB'  
    ,attribute => 'MAX_FAILURES');  
  SYS.DBMS_SCHEDULER.SET_ATTRIBUTE_NULL  
   ( name   => 'SYS.MANUAL_STATS_JOB'  
    ,attribute => 'MAX_RUNS');  
  SYS.DBMS_SCHEDULER.SET_ATTRIBUTE  
   ( name   => 'SYS.MANUAL_STATS_JOB'  
    ,attribute => 'STOP_ON_WINDOW_CLOSE'  
    ,value   => FALSE);  
  SYS.DBMS_SCHEDULER.SET_ATTRIBUTE  
   ( name   => 'SYS.MANUAL_STATS_JOB'  
    ,attribute => 'JOB_PRIORITY'  
    ,value   => 3);  
  SYS.DBMS_SCHEDULER.SET_ATTRIBUTE_NULL  
   ( name   => 'SYS.MANUAL_STATS_JOB'  
    ,attribute => 'SCHEDULE_LIMIT');  
  SYS.DBMS_SCHEDULER.SET_ATTRIBUTE  
   ( name   => 'SYS.MANUAL_STATS_JOB'  
    ,attribute => 'AUTO_DROP'  
    ,value   => FALSE);  
  SYS.DBMS_SCHEDULER.ENABLE  
   (name         => 'SYS.MANUAL_STATS_JOB');  
 END;  
 /  

PS: Some time you may get error saying job not found or insufficient privileges

 ORA-20000: Insufficient privileges to analyze an object in Schema

 For this you need to give the grants to the user who is running it..

 grant ANALYZE ANY to username;
 grant SELECT ANY TABLE to username;

Thursday, August 11, 2016

Purging sql from shared pool

To flush a sql or purge a sql from shared pool you need to find out the sql_id you are trying to purge..

Once you get the id

run the following command

select address, hash_value from v$sqlarea where sql_id like '8ktqrp3vwp51w';

This will give you address and hash_value of the sql

Now insert these values in the following query and execute

exec dbms_shared_pool.purge('0000000B6ECC55E8, 2891302865','C');

Some times when you execute above you will get the following error

*
ERROR at line 1:
ORA-06550: line 1, column 7:
PLS-00201: identifier 'DBMS_SHARED_POOL.PURGE' must be declared
ORA-06550: line 1, column 7:
PL/SQL: Statement ignored


If this is the case

Goto $ORACLE_HOME/rdbms/admin

sqlplus / as sysdba

@dbmspool.sql

After this run the exec command again and this should fix the issue..


You can also flush the entire shared pool using the following command

alter system flush shared_pool;


Wednesday, August 10, 2016

creating sql baselines in oracle 11g

First create a test table

CREATE TABLE test (
  id           NUMBER,
  description  VARCHAR2(50)
);



Insert 10000 rows into it using the following code

BEGIN
  FOR v_LoopCounter IN 1..10000 LOOP
 INSERT INTO test (id)
 VALUES (v_LoopCounter);
END LOOP;
END;
 /


Gather stats on the test table

EXEC DBMS_STATS.gather_table_stats(USER, 'test', cascade=>TRUE);


Check the explain plan for the following query.. It should do a full table scan

SELECT description
FROM   test
WHERE  id = 13;


Find the sql_id for the above sql

SELECT *
FROM   v$sql
WHERE  sql_text LIKE '%test%'
AND    sql_text NOT LIKE '%dba_sql_plan_baselines%'
AND    sql_text NOT LIKE '%EXPLAIN%';


 Since we are loading manually in this scenario it is called manual baseline.. There is a parameter which enables system wide automatic loading of baselines.. To enable it you have to set OPTIMIZER_CAPTURE_SQL_PLAN_BASELINES to True. Default is False. Please dont enable on production systems without proper testing

 Manual method

 SET SERVEROUTPUT ON
DECLARE
  l_plans_loaded  PLS_INTEGER;
BEGIN
  l_plans_loaded := DBMS_SPM.load_plans_from_cursor_cache(
    sql_id => 'gat6z1bc6nc2d');
 
  DBMS_OUTPUT.put_line('Plans Loaded: ' || l_plans_loaded);
END;
/

After the above baseline will be loaded..

You can check the loaded baselines using the following query

SELECT sql_handle, plan_name, enabled, accepted
FROM   dba_sql_plan_baselines

You can see in the output.. It will be yes for both enabled and accepted.


This completes the manual loading of baselines.

To test if baseline is working.. Flush shared pool( not recommended on production again) this is for hard parsing..

ALTER SYSTEM FLUSH SHARED_POOL;

Now create index on the table

CREATE INDEX test_idx ON test(id);

Gather stats

EXEC DBMS_STATS.gather_table_stats(USER, 'test', cascade=>TRUE);

Check the explain plan for the following query.. It should do a full table scan even after creating index..

SELECT description
FROM   test
WHERE  id = 13;

Now run the following query..

SELECT sql_handle, plan_name, enabled, accepted
FROM   dba_sql_plan_baselines

you will see a new baseline but it says not enabled.. This is the plan for new index we created but it wont be used as it is not tested.. Oracle uses new baseline only if performance is tested and better than previous baseline.

So to test that and evolve the new plan we grab the sql_handle from dba_sql_baselines and run the following..
SELECT DBMS_SPM.evolve_sql_plan_baseline(sql_handle => 'SQL_7b76323ad90440b9') FROM   dual;


Now if you run the above select query and check explain plan it should be using the plan with index.

Wednesday, August 3, 2016

ORA-02327: cannot create index on expression with datatype LOB

I got this error when i am trying to rebuild an index on a different tablespace.

Here is the query i ran..

Alter index  xxx.SYS_IL0000194446C00010$$ REBUILD tablespace xxx_data
Error at line 1
ORA-02327: cannot create index on expression with datatype LOB

Previously these tablespaces are in Users tablespace.

So in order to move them we should do the following since these are LOB's


ALTER TABLE xxx.TBL_REQUEST_STATUS_DETAIL MOVE LOB (TECH_ERROR)
STORE AS (TABLESPACE xxx_DATA);

After running this i was able to move them successfully



my reference:

ALTER TABLE xxx.TBL_REQUEST_STATUS_DETAIL MOVE LOB (TECH_ERROR)
STORE AS (TABLESPACE xxx_DATA);
ALTER TABLE xxx.TBL_REQUEST_STATUS_DETAIL MOVE LOB (REQUEST_BODY)
STORE AS (TABLESPACE xxx_DATA);
ALTER TABLE xxx.TBL_REQUEST_STATUS_DETAIL MOVE LOB (RESPONSE_BODY)
STORE AS (TABLESPACE xxx_DATA);
ALTER TABLE xxx.TBL_REQUEST_STATUS_DETAIL MOVE LOB (ERROR_MESSAGE)
STORE AS (TABLESPACE xxx_DATA);

Friday, July 29, 2016

Configure and Install Oracle Application express for 11g database

Installing oracle application express

Usually oracle express will come pre installed in the oracle database

To check if it is installed you need to query dba_registry

select * from dba_registry where comp_id='APEX';

If it is installed note the version and all you need to do is configure it.

For configuring follow these steps

Go to the $ORACLE_HOME/apex directory.

sqlplus / as sysdba

@apxconf.sql

Enter values below for the XDB HTTP listener port and the password for the Application Express ADMIN user.
Default values are in brackets [ ].
Press Enter to accept the default value.

Enter a password for the ADMIN user              []admin_password
Enter a port for the XDB HTTP listener [8080]
...changing HTTP Port

ALTER USER ANONYMOUS ACCOUNT UNLOCK;

Check port usage using the following command.

select dbms_xdb.gethttpport from dual;

Usually the above gives 0. If thats the case run the below to change port to 8080

Enable Oracle XML DB HTTP server

EXEC DBMS_XDB.SETHTTPPORT(8080);
COMMIT;

This completes setup. You can login to admin console from following location.

http://host:port/apex
http://host:port/apex/apex_admin — Admin console


To upgrade Apex to latest version download from oracle support. In my case i am upgrading from version 3 to version 5.

apex_5.0.4_en.zip

unzip apex_5.0.4_en.zip

I unzipped the file to /opt/app/oracle


Sqlplus / as sysdba

Make sure anonymos, apex_public_user and APEX_030000 users are unlocked

Check default tablespace and temporary tablespace of the users which are usually sysaux and temporary


Once this is done, Navigate to unzipped folder and run the following

cd /opt/app/oracle/apex

sqlplus / as sysdba

@apexins.sql SYSAUX SYSAUX TEMP /i/


This takes some time to complete

After installation is complete you need to setup admin password. You need to do this again even if you did it earlier because user needs to be configured based on new version

@apxchpwd.sql

================================================================================
This script can be used to change the password of an Application Express
instance administrator. If the user does not yet exist, a user record will be
created.
================================================================================
Enter the administrator's username [ADMIN]
User "ADMIN" exists.
Enter ADMIN's email [ADMIN]
Enter ADMIN's password []
Changed password of instance administrator ADMIN.


After this is done there is one more final step to load images

To load images give the directory without apex as below.

sqlplus / as sysdba

@apex_epg_config.sql /opt/app/oracle

This takes around 2 minutes.

This completes the upgrade. You may login using the web address. Sometimes you might need to restart db for the changes to take effect



Friday, July 8, 2016

[Off topic] How to calculate your lease payment and negotiate lease deal

First visit all the dealerships and test drive the vehicles you like. Just take basic quotes from the dealers and do not buy at that point.

Once you have driven all the cars narrow down based on what you like. After you know what car you liked the most start research on the car.

Hit to edmunds forums and go to the vehicle specific forums and see what others are paying. Also request money factor and residual value of the model you would like to buy and one of the moderator will post the values for you.

Now visit specific car related forums. In my case i am planning to get a 2016 Honda civic touring.

So i went to civicx forums to see what are the possible issues people are facing and see if there are any TSB's to fix the issue. Make sure you check these issues before you finalize your purchase.

In addition to that see what prices people are paying around your area as they have state specific forums in civicx for example.

Once you get an idea just go to truecar and get the price for your model. Just use this as reference.

Also in edmunds you can check the invoice price of the vehicle.

The sticker price for civic touring $27335 ( Including destination fee)

Invoice price is around $24600 + $835 destination fee

Keeping these in mind i want to target price less than invoice including destination for $24500

From here i started requesting quotes online from all dealerships around me in 30 mile radius.

After negotiations over the phone and emails i am left with prices ranging from 24100 to 24900

I send this 24100 quote i got from the dealer who is quite far from my place to nearest dealer to see if he can beat it and i said i will be coming in today for purchase.

Instantly i got a call saying to bring the printed quote and they will beat it by $50 bucks.

So i headed to the dealership and when i told them this is for a lease they said for lease they make calculations differently and some crap. Dont hesitate to walkout. They will call you back in most of the cases. They will try to comeup with some numbers and force you but be firm and ask them the money factor (multiply this value with 2400 to get interest rate) and residual value( this is the value finance company decides what the value of car at the end of 3 yr lease)

So For honda civic touring i was told residual is 57% and MF is 0.00070(only if credit score above 760)


Do the math dont panic and let dealer match the price. This is how you calculate

Residual value is always calculated on MSRP or sticker price

Since dealer said 57%

27335*(57/100) = 15580.95

So your car is worth 15580.95 at the end of the lease. If you want you can buy your car if you want for that price at the ened of lease.

Money factor i was told is 0.00070

multiply money factor by 2400 to get interest rate. This is just for your reference,

In this case it is 0.00070*2400=1.68%



For lease you pay the depreciation of the car and the interest

MSRP=27335
Negotiated price= 24050
Residual Value= 15580.95
Lease acquisition fee =$595

So Total price you negotiated= 24050+595=$24645


Lease acquisition fee is mandatory when you are leasing


So first calculate this if its for 36 month lease

Depreciation = (24645-15581)/36 = $251.77

Interest = (24645+15581)*0.00070 = $28.15

Add these two

$251.77 + $28.15 = $279.92 + tax

These are the monthly payments you need to pay for 36 months

Our tax rate is 8% so it is $22.39


So total including tax per month is $302.31 for 36 months including tax.

Dont panic and be clear and dont overpay.

In addition to this when you drive of you need to pay dealer, title and documentation fee. In California it is less than $400

Happy Leasing. Leave comments if you have any questions.





Wednesday, July 6, 2016

Basic procedures to give db level grants to users

Some times we need to grant a particular db user to kill sessions and for that we need to  grant alter system access which is not a best practice. So to over come we create a simple procedure to do the required actions and grant execute permissions to the user on that procedure.

Here are few examples..

Killing session procedure

CREATE OR REPLACE procedure kill_db_session
 ( v_sid number, v_serial number )  as
 v_varchar2 varchar2(100);
begin
execute immediate 'ALTER SYSTEM KILL SESSION '''   || v_sid || ',' || v_serial || ''' IMMEDIATE';
end;
/

Refresh materialized view under a different user

CREATE OR REPLACE PROCEDURE SSS.REFRESH_MV AS
BEGIN
    DBMS_MVIEW.REFRESH('xxx','C');
END REFRESH_MV;
/


Here xxx is tablename and C means complete refresh

Make sure you give grants on tables explicitly for mviews or else they will throw table not found error

Friday, June 17, 2016

Exadata IORM (Incomplete)

alter iormplan dbplan=((name='DDWH', level=1, allocation=10),(name=other, level=1, allocation=90))


alter iormplan active


LIST IORMPLAN detail

alter iormplan objective=auto;

alter iormplan objective=basic;


Example 6-12 Resetting Default Values in an Interdatabase Plan

CellCLI> ALTER IORMPLAN dbPlan="", catPlan=""
CellCLI> ALTER IORMPLAN dbPlan=""
CellCLI> ALTER IORMPLAN catPlan=""


[root@xxx01 ~]# cellcli
CellCLI: Release 12.1.2.3.1 - Production on Mon Jun 13 16:33:04 PDT 2016

Copyright (c) 2007, 2016, Oracle.  All rights reserved.

CellCLI> list iormplan detail;
         name:                   xxx01_IORMPLAN
         catPlan:
         dbPlan:
         objective:              basic
         status:                 active






CellCLI> list iormplan detail;
         name:                   xxx01_IORMPLAN
         catPlan:
         dbPlan:
         objective:              basic
         status:                 active


Manage IO distribution across databases

alter iormplan dbplan=((name='DDWH', level=1, allocation=1),(name=other, level=1, allocation=99))

alter iormplan objective=auto;

alter iormplan active




Reset back to default

alter iormplan objective=basic;


CellCLI> ALTER IORMPLAN dbPlan="", catPlan=""

alter iormplan active

**************************************************************************
To set a plan

change objective from basic to auto

Here allocation is ( Only during contention )
xxxdr will get 35% io
xxdr will get 22% io
xwh will get 22% io

Remaining all db's will be in level 2

alter iormplan objective=auto;


alter iormplan dbplan=((name='XXXDR', level=1, allocation=35),(name='XXDR', level=1, allocation=22),(name='XWH',level=1,allocation=22),(name=other,level=2,allocation=100))


Another scenario from oracle note

suppose we have 3 databases sharing the Exadata storage cells in a hosted environment. Databases 'sales' and 'finance' are more important than 'hr', so we'd like to give them more I/O bandwidth when there is I/O contention. We therefore set the shares in a 4-4-1 ratio. This means that when there is I/O contention, 'sales' and 'finance' will get equal I/O bandwidth. They will each also get 4x the bandwidth of 'hr'. If only one database is active, then it can consume all of the I/O bandwidth.

alter iormplan dbplan = -
  ((name=sales,   share=4), -
   (name=finance, share=4), -
   (name=hr,      share=1), -
   (name=default, share=1));


Featured Post

Apply Patch 22191577 latest GI PSU to RAC and DB homes using Opatch auto or manual steps

Patch 22191577: GRID INFRASTRUCTURE PATCH SET UPDATE 11.2.0.4.160119 (JAN2016) Unzip the patch 22191577 Unzip latest Opatch Version in or...